Merge "Move data directory under doc/source"
diff --git a/HACKING.rst b/HACKING.rst
index cb9821e..79ebc4d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -128,6 +128,12 @@
Test class level resources should be defined in the `resource_setup` method of
the test class, except for any credential obtained from the credentials
provider, which should be set-up in the `setup_credentials` method.
+Cleanup is best scheduled using `addClassResourceCleanup` which ensures that
+the cleanup code is always invoked, and in reverse order with respect to the
+creation order.
+
+In both cases - test level and class level cleanups - a wait loop should be
+scheduled before the actual delete of resources with an asynchronous delete.
The test base class `BaseTestCase` defines Tempest framework for class level
fixtures. `setUpClass` and `tearDownClass` are defined here and cannot be
@@ -380,7 +386,7 @@
Otherwise the bug fix won't be able to land in the project.
Handily, `Zuul’s cross-repository dependencies
-<https://docs.openstack.org/infra/zuul/gating.html#cross-repository-dependencies>`_.
+<https://docs.openstack.org/infra/zuul/user/gating.html#cross-project-dependencies>`_.
can be leveraged to do without step 2 and to have steps 3 and 4 happen
"atomically". To do that, make the patch written in step 1 to depend (refer to
Zuul's documentation above) on the patch written in step 4. The commit message
diff --git a/README.rst b/README.rst
index 2e13fec..17d4cba 100644
--- a/README.rst
+++ b/README.rst
@@ -3,7 +3,7 @@
========================
.. image:: http://governance.openstack.org/badges/tempest.svg
- :target: http://governance.openstack.org/reference/tags/index.html
+ :target: https://governance.openstack.org/tc/reference/tags/index.html
.. Change things from this point on
diff --git a/doc/source/library.rst b/doc/source/library.rst
index a461a0f..074d642 100644
--- a/doc/source/library.rst
+++ b/doc/source/library.rst
@@ -69,3 +69,4 @@
library/auth
library/clients
library/credential_providers
+ library/validation_resources
diff --git a/doc/source/library/credential_providers.rst b/doc/source/library/credential_providers.rst
index f4eb37d..d96c97a 100644
--- a/doc/source/library/credential_providers.rst
+++ b/doc/source/library/credential_providers.rst
@@ -130,19 +130,18 @@
# role
provider.clear_creds()
-API Reference
-=============
-------------------------------
+API Reference
+-------------
+
The dynamic credentials module
-------------------------------
+''''''''''''''''''''''''''''''
.. automodule:: tempest.lib.common.dynamic_creds
:members:
---------------------------------------
The pre-provisioned credentials module
---------------------------------------
+''''''''''''''''''''''''''''''''''''''
.. automodule:: tempest.lib.common.preprov_creds
:members:
diff --git a/doc/source/library/validation_resources.rst b/doc/source/library/validation_resources.rst
new file mode 100644
index 0000000..9b36476
--- /dev/null
+++ b/doc/source/library/validation_resources.rst
@@ -0,0 +1,11 @@
+.. _validation_resources:
+
+Validation Resources
+====================
+
+-------------------------------
+The validation_resources module
+-------------------------------
+
+.. automodule:: tempest.lib.common.validation_resources
+ :members:
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index aec55e9..5a2876e 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -59,10 +59,16 @@
* setup_clients
* resource_setup
-which is executed in that order. An example of a TestCase which defines all
+which is executed in that order. Cleanup of resources provisioned during
+the resource_setup must be scheduled right after provisioning using
+the addClassResourceCleanp helper. The resource cleanups stacked this way
+are executed in reverse order during tearDownClass, before the cleanup of
+test credentials takes place. An example of a TestCase which defines all
of these would be::
-
+
+ from tempest.common import waiters
from tempest import config
+ from tempest.lib.common.utils import test_utils
from tempest import test
CONF = config.CONF
@@ -111,6 +117,13 @@
"""
super(TestExampleCase, cls).resource_setup()
cls.shared_server = cls.servers_client.create_server(...)
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client,
+ cls.shared_server['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc(
+ cls.servers_client.delete_server,
+ cls.shared_server['id']))
.. _credentials:
diff --git a/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
new file mode 100644
index 0000000..7814f4e
--- /dev/null
+++ b/releasenotes/notes/add-validation-resources-to-lib-dc2600c4324ca4d7.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Add the `validation_resources` module to tempest.lib. The module provides
+ a set of helpers that can be used to provision and cleanup all the
+ resources required to perform ping / ssh tests against a virtual machine:
+ a keypair, a security group with targeted rules and a floating IP.
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index 0c80252..a9772c4 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -143,6 +143,8 @@
test_utils.call_and_ignore_notfound_exc(
cls.networks_client.delete_network, network['id'])
+ super(AutoAllocateNetworkTest, cls).resource_cleanup()
+
@decorators.idempotent_id('5eb7b8fa-9c23-47a2-9d7d-02ed5809dd34')
def test_server_create_no_allocate(self):
"""Tests that no networking is allocated for the server."""
@@ -175,9 +177,11 @@
_, servers = compute.create_test_server(
self.os_primary, networks='auto', wait_until='ACTIVE',
min_count=3)
- server_nets = set()
for server in servers:
self.addCleanup(self.delete_server, server['id'])
+
+ server_nets = set()
+ for server in servers:
# get the server ips
addresses = self.servers_client.list_addresses(
server['id'])['addresses']
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 66bedd9..08b2d19 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -17,8 +17,10 @@
from tempest.api.compute import base
from tempest.common.utils.linux import remote_client
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
@@ -35,12 +37,6 @@
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.client = cls.servers_client
- @classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
-
- super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
-
@decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
@@ -67,20 +63,30 @@
admin_pass = self.image_ssh_password
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server_no_eph_disk = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_no_eph_disk['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ server_no_eph_disk['id'])
+
# Get partition number of server without ephemeral disk.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server_no_eph_disk),
+ self.get_server_ip(server_no_eph_disk,
+ validation_resources),
self.ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server_no_eph_disk,
servers_client=self.client)
disks_num = len(linux_client.get_disks().split('\n'))
@@ -90,17 +96,25 @@
server_with_eph_disk = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_with_eph_disk['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ server_with_eph_disk['id'])
+
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server_with_eph_disk),
+ self.get_server_ip(server_with_eph_disk,
+ validation_resources),
self.ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server_with_eph_disk,
servers_client=self.client)
disks_num_eph = len(linux_client.get_disks().split('\n'))
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 746f83a..1a31723 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -117,15 +117,12 @@
cls.image_ssh_user = CONF.validation.image_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
cls.servers = []
- cls.images = []
cls.security_groups = []
cls.server_groups = []
cls.volumes = []
@classmethod
def resource_cleanup(cls):
- cls.clear_resources('images', cls.images,
- cls.compute_images_client.delete_image)
cls.clear_servers()
cls.clear_resources('security groups', cls.security_groups,
cls.security_groups_client.delete_security_group)
@@ -190,7 +187,7 @@
@classmethod
def create_test_server(cls, validatable=False, volume_backed=False,
- **kwargs):
+ validation_resources=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
@@ -200,6 +197,10 @@
:param validatable: Whether the server will be pingable or sshable.
:param volume_backed: Whether the instance is volume backed or not.
+ :param validation_resources: Dictionary of validation resources as
+ returned by `get_class_validation_resources`.
+ :param kwargs: Extra arguments are passed down to the
+ `compute.create_test_server` call.
"""
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server")
@@ -216,7 +217,7 @@
body, servers = compute.create_test_server(
cls.os_primary,
validatable,
- validation_resources=cls.validation_resources,
+ validation_resources=validation_resources,
tenant_network=tenant_network,
volume_backed=volume_backed,
**kwargs)
@@ -293,7 +294,9 @@
image = cls.compute_images_client.create_image(server_id, name=name,
**kwargs)
image_id = data_utils.parse_image_id(image.response['location'])
- cls.images.append(image_id)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.compute_images_client.delete_image,
+ image_id)
if wait_until is not None:
try:
@@ -326,13 +329,33 @@
@classmethod
def rebuild_server(cls, server_id, validatable=False, **kwargs):
- # Destroy an existing server and creates a new one
+ """Destroy an existing class level server and creates a new one
+
+ Some test classes use a test server that can be used by multiple
+ tests. This is done to optimise runtime and test load.
+ If something goes wrong with the test server, it can be rebuilt
+ using this helper.
+
+ This helper can also be used for the initial provisioning if no
+ server_id is specified.
+
+ :param server_id: UUID of the server to be rebuilt. If None is
+ specified, a new server is provisioned.
+ :param validatable: whether to the server needs to be
+ validatable. When True, validation resources are acquired via
+ the `get_class_validation_resources` helper.
+ :param kwargs: extra paramaters are passed through to the
+ `create_test_server` call.
+ :return: the UUID of the created server.
+ """
if server_id:
cls.delete_server(server_id)
cls.password = data_utils.rand_password()
server = cls.create_test_server(
validatable,
+ validation_resources=cls.get_class_validation_resources(
+ cls.os_primary),
wait_until='ACTIVE',
adminPass=cls.password,
**kwargs)
@@ -363,14 +386,23 @@
cls._delete_volume(cls.volumes_client, volume_id)
@classmethod
- def get_server_ip(cls, server):
+ def get_server_ip(cls, server, validation_resources=None):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
+
+ :param server: The server dict as returned by the API
+ :param validation_resources: The dict of validation resources
+ provisioned for the server.
"""
if CONF.validation.connect_method == 'floating':
- return cls.validation_resources['floating_ip']['ip']
+ if validation_resources:
+ return validation_resources['floating_ip']['ip']
+ else:
+ msg = ('When validation.connect_method equals floating, '
+ 'validation_resources cannot be None')
+ raise exceptions.InvalidParam(invalid_param=msg)
elif CONF.validation.connect_method == 'fixed':
addresses = server['addresses'][CONF.validation.network_for_ssh]
for address in addresses:
@@ -495,12 +527,10 @@
def get_host_other_than(self, server_id):
source_host = self.get_host_for_server(server_id)
- list_hosts_resp = self.os_admin.hosts_client.list_hosts()['hosts']
- hosts = [
- host_record['host_name']
- for host_record in list_hosts_resp
- if host_record['service'] == 'compute'
- ]
+ hypers = self.os_admin.hypervisor_client.list_hypervisors(
+ )['hypervisors']
+ hosts = [hyper['hypervisor_hostname'] for hyper in hypers
+ if hyper['state'] == 'up' and hyper['status'] == 'enabled']
for target_host in hosts:
if source_host != target_host:
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 8d503dc..b497626 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -70,7 +71,9 @@
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
cls.image_id = body['id']
- cls.images.append(cls.image_id)
+ cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+ cls.glance_client.delete_image,
+ cls.image_id)
image_file = six.BytesIO((b'*' * 1024))
if CONF.image_feature_enabled.api_v1:
cls.glance_client.update_image(cls.image_id, data=image_file)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index 5987d39..e62e25e 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -74,7 +74,6 @@
# Verify the image was deleted correctly
self.client.delete_image(image['id'])
- self.images.remove(image['id'])
self.client.wait_for_resource_deletion(image['id'])
@decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index cf32ba3..7ecfa0a 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -107,7 +107,6 @@
image_id = data_utils.parse_image_id(image.response['location'])
self.client.delete_image(image_id)
- self.images.remove(image_id)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
@@ -130,6 +129,5 @@
# Do not wait, attempt to delete the image, ensure it's successful
self.client.delete_image(image_id)
- self.images.remove(image_id)
self.assertRaises(lib_exc.NotFound,
self.client.show_image, image_id)
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index acc8b3e..d83d8df 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -23,6 +23,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -74,7 +75,10 @@
body = cls.glance_client.create_image(**params)
body = body['image'] if 'image' in body else body
image_id = body['id']
- cls.images.append(image_id)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.compute_images_client.delete_image,
+ image_id)
# Wait 1 second between creation and upload to ensure a delta
# between created_at and updated_at.
time.sleep(1)
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index eeb423e..62d5bea 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -52,8 +52,7 @@
self.client.wait_for_resource_deletion(sg['id'])
# Now check if all the created Security Groups are deleted
fetched_list = self.client.list_security_groups()['security_groups']
- deleted_sgs = \
- [sg for sg in security_group_list if sg in fetched_list]
+ deleted_sgs = [sg for sg in security_group_list if sg in fetched_list]
self.assertFalse(deleted_sgs,
"Failed to delete Security Group %s "
"list" % ', '.join(m_group['name']
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index e0a1d77..0248c65 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -103,7 +103,6 @@
['interfaceAttachment'])
iface = waiters.wait_for_interface_status(
self.interfaces_client, server['id'], iface['port_id'], 'ACTIVE')
- self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
@@ -190,7 +189,6 @@
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- self._check_interface(ifs[0])
try:
iface = self._test_create_interface(server)
@@ -228,7 +226,6 @@
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertGreater(interface_count, 0)
- self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
# Remove the fixed IP from server.
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index d8ce7ea..c660821 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,8 +42,9 @@
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
+ validation_resources = cls.get_class_validation_resources(
+ cls.os_primary)
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
@@ -52,6 +53,7 @@
disk_config = cls.disk_config
server_initial = cls.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
@@ -105,11 +107,13 @@
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
linux_client = remote_client.RemoteClient(
- self.get_server_ip(self.server),
+ self.get_server_ip(self.server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
output = linux_client.exec_command('grep -c ^processor /proc/cpuinfo')
@@ -120,11 +124,13 @@
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
linux_client = remote_client.RemoteClient(
- self.get_server_ip(self.server),
+ self.get_server_ip(self.server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=self.server,
servers_client=self.client)
hostname = linux_client.exec_command("hostname").rstrip()
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index dbf6713..a126fd6 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -66,11 +66,6 @@
dhcp=True)
super(DeviceTaggingTest, cls).setup_credentials()
- @classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
- super(DeviceTaggingTest, cls).resource_setup()
-
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
for d in md_dict['devices']:
@@ -139,9 +134,12 @@
# Create server
admin_pass = data_utils.rand_password()
config_drive_enabled = CONF.compute_feature_enabled.config_drive
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
config_drive=config_drive_enabled,
adminPass=admin_pass,
name=data_utils.rand_name('device-tagging-server'),
@@ -208,10 +206,10 @@
self.addCleanup(self.delete_server, server['id'])
self.ssh_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
CONF.validation.image_ssh_user,
admin_pass,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index b5fc39c..4cfc665 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -44,8 +44,13 @@
self.server_id, 'ACTIVE')
except lib_exc.NotFound:
# The server was deleted by previous test, create a new one
+ # Use class level validation resources to avoid them being
+ # deleted once a test is over
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE')
self.__class__.server_id = server['id']
except Exception:
@@ -69,8 +74,6 @@
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
-
super(ServerActionsTestJSON, cls).resource_setup()
cls.server_id = cls.rebuild_server(None, validatable=True)
@@ -80,8 +83,11 @@
def test_change_server_password(self):
# Since this test messes with the password and makes the
# server unreachable, it should create its own server
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
newserver = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE')
# The server's password should be set to the provided password
new_password = 'Newpass1234'
@@ -92,7 +98,7 @@
# Verify that the user can authenticate with the new password
server = self.client.show_server(newserver['id'])['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
new_password,
server=server,
@@ -101,13 +107,15 @@
def _test_reboot_server(self, reboot_type):
if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
# Get the time the server was last rebooted,
server = self.client.show_server(self.server_id)['server']
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
boot_time = linux_client.get_boot_time()
@@ -122,10 +130,10 @@
if CONF.validation.run_validation:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user,
self.password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
new_boot_time = linux_client.get_boot_time()
@@ -201,6 +209,8 @@
self.assertEqual(original_addresses, server['addresses'])
if CONF.validation.run_validation:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
# Authentication is attempted in the following order of priority:
# 1.The key passed in, if one was passed in.
# 2.Any key we can find through an SSH agent (if allowed).
@@ -208,10 +218,10 @@
# ~/.ssh/ (if allowed).
# 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
- self.get_server_ip(rebuilt_server),
+ self.get_server_ip(rebuilt_server, validation_resources),
self.ssh_user,
password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=rebuilt_server,
servers_client=self.client)
linux_client.validate_authentication()
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 90b9da4..2f0f5ee 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -34,11 +35,6 @@
super(ServerPersonalityTestJSON, cls).setup_credentials()
@classmethod
- def resource_setup(cls):
- cls.set_validation_resources()
- super(ServerPersonalityTestJSON, cls).resource_setup()
-
- @classmethod
def skip_checks(cls):
super(ServerPersonalityTestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.personality:
@@ -57,16 +53,23 @@
personality = [{'path': file_path,
'contents': base64.encode_as_text(file_contents)}]
password = data_utils.rand_password()
- created_server = self.create_test_server(personality=personality,
- adminPass=password,
- wait_until='ACTIVE',
- validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ created_server = self.create_test_server(
+ personality=personality, adminPass=password, wait_until='ACTIVE',
+ validatable=True,
+ validation_resources=validation_resources)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, created_server['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ created_server['id'])
server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user, password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
self.assertEqual(file_contents,
@@ -75,8 +78,16 @@
@decorators.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
def test_rebuild_server_with_personality(self):
- server = self.create_test_server(wait_until='ACTIVE', validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ wait_until='ACTIVE', validatable=True,
+ validation_resources=validation_resources)
server_id = server['id']
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server, server_id)
file_contents = 'Test server rebuild.'
personality = [{'path': 'rebuild.txt',
'contents': base64.encode_as_text(file_contents)}]
@@ -126,16 +137,22 @@
'contents': base64.encode_as_text(file_contents + str(i)),
})
password = data_utils.rand_password()
- created_server = self.create_test_server(personality=person,
- adminPass=password,
- wait_until='ACTIVE',
- validatable=True)
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ created_server = self.create_test_server(
+ personality=person, adminPass=password, wait_until='ACTIVE',
+ validatable=True, validation_resources=validation_resources)
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, created_server['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.servers_client.delete_server,
+ created_server['id'])
server = self.client.show_server(created_server['id'])['server']
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.ssh_user, password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.client)
for i in person:
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index e0fed58..9bef80f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -40,35 +40,37 @@
@classmethod
def resource_setup(cls):
- cls.set_validation_resources()
super(AttachVolumeTestJSON, cls).resource_setup()
cls.device = CONF.compute.volume_device_name
def _create_server(self):
# Start a server and wait for it to become ready
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
server = self.create_test_server(
validatable=True,
+ validation_resources=validation_resources,
wait_until='ACTIVE',
adminPass=self.image_ssh_password)
self.addCleanup(self.delete_server, server['id'])
# Record addresses so that we can ssh later
server['addresses'] = self.servers_client.list_addresses(
server['id'])['addresses']
- return server
+ return server, validation_resources
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
- server = self._create_server()
+ server, validation_resources = self._create_server()
# NOTE(andreaf) Create one remote client used throughout the test.
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
# NOTE(andreaf) We need to ensure the ssh key has been
@@ -111,7 +113,7 @@
@decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
def test_list_get_volume_attachments(self):
# List volume attachment of the server
- server = self._create_server()
+ server, _ = self._create_server()
volume_1st = self.create_volume()
attachment_1st = self.attach_volume(server, volume_1st,
device=('/dev/%s' % self.device))
@@ -163,15 +165,15 @@
if not CONF.compute_feature_enabled.shelve:
raise cls.skipException('Shelve is not available.')
- def _count_volumes(self, server):
+ def _count_volumes(self, server, validation_resources):
# Count number of volumes on an instance
volumes = 0
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
@@ -179,7 +181,7 @@
volumes = int(linux_client.exec_command(command).strip())
return volumes
- def _shelve_server(self, server):
+ def _shelve_server(self, server, validation_resources):
# NOTE(andreaf) If we are going to shelve a server, we should
# check first whether the server is ssh-able. Otherwise we
# won't be able to distinguish failures introduced by shelve
@@ -188,10 +190,10 @@
# avoid breaking the VM
if CONF.validation.run_validation:
linux_client = remote_client.RemoteClient(
- self.get_server_ip(server),
+ self.get_server_ip(server, validation_resources),
self.image_ssh_user,
self.image_ssh_password,
- self.validation_resources['keypair']['private_key'],
+ validation_resources['keypair']['private_key'],
server=server,
servers_client=self.servers_client)
linux_client.validate_authentication()
@@ -199,30 +201,34 @@
# If validation went ok, or it was skipped, shelve the server
compute.shelve_server(self.servers_client, server['id'])
- def _unshelve_server_and_check_volumes(self, server, number_of_volumes):
+ def _unshelve_server_and_check_volumes(self, server,
+ validation_resources,
+ number_of_volumes):
# Unshelve the instance and check that there are expected volumes
self.servers_client.unshelve_server(server['id'])
waiters.wait_for_server_status(self.servers_client,
server['id'],
'ACTIVE')
if CONF.validation.run_validation:
- counted_volumes = self._count_volumes(server)
+ counted_volumes = self._count_volumes(
+ server, validation_resources)
self.assertEqual(number_of_volumes, counted_volumes)
@decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
def test_attach_volume_shelved_or_offload_server(self):
# Create server, count number of volumes on it, shelve
# server and attach pre-created volume to shelved server
- server = self._create_server()
+ server, validation_resources = self._create_server()
volume = self.create_volume()
- num_vol = self._count_volumes(server)
- self._shelve_server(server)
+ num_vol = self._count_volumes(server, validation_resources)
+ self._shelve_server(server, validation_resources)
attachment = self.attach_volume(server, volume,
device=('/dev/%s' % self.device),
check_reserved=True)
# Unshelve the instance and check that attached volume exists
- self._unshelve_server_and_check_volumes(server, num_vol + 1)
+ self._unshelve_server_and_check_volumes(
+ server, validation_resources, num_vol + 1)
# Get volume attachment of the server
volume_attachment = self.servers_client.show_volume_attachment(
@@ -238,10 +244,10 @@
def test_detach_volume_shelved_or_offload_server(self):
# Count number of volumes on instance, shelve
# server and attach pre-created volume to shelved server
- server = self._create_server()
+ server, validation_resources = self._create_server()
volume = self.create_volume()
- num_vol = self._count_volumes(server)
- self._shelve_server(server)
+ num_vol = self._count_volumes(server, validation_resources)
+ self._shelve_server(server, validation_resources)
# Attach and then detach the volume
self.attach_volume(server, volume, device=('/dev/%s' % self.device),
@@ -252,4 +258,5 @@
# Unshelve the instance and check that we have the expected number of
# volume(s)
- self._unshelve_server_and_check_volumes(server, num_vol)
+ self._unshelve_server_and_check_volumes(
+ server, validation_resources, num_vol)
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 5c3cd26..6343ea8 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -161,16 +161,14 @@
manager_project_id]
# Get available project scopes
- available_projects =\
- self.client.list_auth_projects()['projects']
+ available_projects = self.client.list_auth_projects()['projects']
# create list to save fetched project's id
fetched_project_ids = [i['id'] for i in available_projects]
# verifying the project ids in list
missing_project_ids = \
- [p for p in assigned_project_ids
- if p not in fetched_project_ids]
+ [p for p in assigned_project_ids if p not in fetched_project_ids]
self.assertEmpty(missing_project_ids,
"Failed to find project_id %s in fetched list" %
', '.join(missing_project_ids))
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
old mode 100755
new mode 100644
diff --git a/tempest/api/identity/v3/test_projects.py b/tempest/api/identity/v3/test_projects.py
index 0ae35ea..bbb4013 100644
--- a/tempest/api/identity/v3/test_projects.py
+++ b/tempest/api/identity/v3/test_projects.py
@@ -24,8 +24,7 @@
@decorators.idempotent_id('86128d46-e170-4644-866a-cc487f699e1d')
def test_list_projects_returns_only_authorized_projects(self):
- alt_project_name =\
- self.os_alt.credentials.project_name
+ alt_project_name = self.os_alt.credentials.project_name
resp = self.non_admin_users_client.list_user_projects(
self.os_primary.credentials.user_id)
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 6bec0d7..8308e34 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -96,6 +96,12 @@
cls.metering_labels = []
cls.metering_label_rules = []
cls.ethertype = "IPv" + str(cls._ip_version)
+ if cls._ip_version == 4:
+ cls.cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ cls.mask_bits = CONF.network.project_network_mask_bits
+ elif cls._ip_version == 6:
+ cls.cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
+ cls.mask_bits = CONF.network.project_network_v6_mask_bits
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index a471bd6..3075047 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
import six
from tempest.api.network import base
@@ -41,8 +40,6 @@
api_extensions
"""
- _project_network_cidr = CONF.network.project_network_cidr
-
@classmethod
def skip_checks(cls):
super(AllowedAddressPairTestJSON, cls).skip_checks()
@@ -105,8 +102,7 @@
@decorators.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
def test_update_port_with_cidr_address_pair(self):
# Update allowed address pair with cidr
- cidr = str(netaddr.IPNetwork(self._project_network_cidr))
- self._update_port_with_address(cidr)
+ self._update_port_with_address(str(self.cidr))
@decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
def test_update_port_with_multiple_ip_mac_address_pair(self):
@@ -135,4 +131,3 @@
class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
_ip_version = 6
- _project_network_cidr = CONF.network.project_network_v6_cidr
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 88340c1..1c59556 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -34,8 +34,7 @@
def resource_setup(cls):
super(BaseNetworkTestResources, cls).resource_setup()
cls.network = cls.create_network()
- cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
- cls._ip_version)
+ cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network)
cls._subnet_data = {6: {'gateway':
str(cls._get_gateway_from_tempest_conf(6)),
'allocation_pools':
@@ -64,20 +63,13 @@
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
@classmethod
- def _create_subnet_with_last_subnet_block(cls, network, ip_version):
+ def _create_subnet_with_last_subnet_block(cls, network):
# Derive last subnet CIDR block from project CIDR and
# create the subnet with that derived CIDR
- if ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- mask_bits = CONF.network.project_network_mask_bits
- elif ip_version == 6:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- mask_bits = CONF.network.project_network_v6_mask_bits
-
- subnet_cidr = list(cidr.subnet(mask_bits))[-1]
+ subnet_cidr = list(cls.cidr.subnet(cls.mask_bits))[-1]
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
return cls.create_subnet(network, gateway=gateway_ip,
- cidr=subnet_cidr, mask_bits=mask_bits)
+ cidr=subnet_cidr, mask_bits=cls.mask_bits)
@classmethod
def _get_gateway_from_tempest_conf(cls, ip_version):
@@ -487,14 +479,8 @@
def test_bulk_create_delete_subnet(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 subnets in one request
- if self._ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- mask_bits = CONF.network.project_network_mask_bits
- else:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- mask_bits = CONF.network.project_network_v6_mask_bits
-
- cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
+ cidrs = [subnet_cidr
+ for subnet_cidr in self.cidr.subnet(self.mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 5c36747..eb53fbb 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -84,25 +84,13 @@
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
- @classmethod
- def _get_ipaddress_from_tempest_conf(cls):
- """Return subnet with mask bits for configured CIDR """
- if cls._ip_version == 4:
- cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- cidr.prefixlen = CONF.network.project_network_mask_bits
-
- elif cls._ip_version == 6:
- cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
- cidr.prefixlen = CONF.network.project_network_v6_mask_bits
-
- return cidr
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
- address = self._get_ipaddress_from_tempest_conf()
+ address = self.cidr
+ address.prefixlen = self.mask_bits
if ((address.version == 4 and address.prefixlen >= 30) or
(address.version == 6 and address.prefixlen >= 126)):
msg = ("Subnet %s isn't large enough for the test" % address.cidr)
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 3883cc2..99ffaa8 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -59,13 +59,6 @@
msg = "router extension not enabled."
raise cls.skipException(msg)
- @classmethod
- def resource_setup(cls):
- super(RoutersTest, cls).resource_setup()
- cls.tenant_cidr = (CONF.network.project_network_cidr
- if cls._ip_version == 4 else
- CONF.network.project_network_v6_cidr)
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
@testtools.skipUnless(CONF.network.public_network_id,
@@ -139,33 +132,6 @@
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
- def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
- show_body = self.admin_routers_client.show_router(router_id)
- actual_ext_gw_info = show_body['router']['external_gateway_info']
- if exp_ext_gw_info is None:
- self.assertIsNone(actual_ext_gw_info)
- return
- # Verify only keys passed in exp_ext_gw_info
- for k, v in exp_ext_gw_info.items():
- self.assertEqual(v, actual_ext_gw_info[k])
-
- def _verify_gateway_port(self, router_id):
- list_body = self.admin_ports_client.list_ports(
- network_id=CONF.network.public_network_id,
- device_id=router_id)
- self.assertEqual(len(list_body['ports']), 1)
- gw_port = list_body['ports'][0]
- fixed_ips = gw_port['fixed_ips']
- self.assertNotEmpty(fixed_ips)
- # Assert that all of the IPs from the router gateway port
- # are allocated from a valid public subnet.
- public_net_body = self.admin_networks_client.show_network(
- CONF.network.public_network_id)
- public_subnet_ids = public_net_body['network']['subnets']
- for fixed_ip in fixed_ips:
- subnet_id = fixed_ip['subnet_id']
- self.assertIn(subnet_id, public_subnet_ids)
-
@decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
@@ -202,7 +168,7 @@
def test_update_delete_extra_route(self):
# Create different cidr for each subnet to avoid cidr duplicate
# The cidr starts from project_cidr
- next_cidr = netaddr.IPNetwork(self.tenant_cidr)
+ next_cidr = self.cidr
# Prepare to build several routes
test_routes = []
routes_num = 4
@@ -278,7 +244,7 @@
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
- sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+ sub02_cidr = self.cidr.next()
subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
router = self._create_router()
interface01 = self._add_router_interface_with_subnet_id(router['id'],
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 60b9de7..c9ce55c 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import netaddr
-
from tempest.api.network import base
from tempest.common import utils
from tempest import config
@@ -40,9 +38,6 @@
cls.router = cls.create_router()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
- cls.tenant_cidr = (CONF.network.project_network_cidr
- if cls._ip_version == 4 else
- CONF.network.project_network_v6_cidr)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
@@ -57,7 +52,7 @@
@decorators.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
def test_router_add_gateway_net_not_external_returns_400(self):
alt_network = self.create_network()
- sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
+ sub_cidr = self.cidr.next()
self.create_subnet(alt_network, cidr=sub_cidr)
self.assertRaises(lib_exc.BadRequest,
self.routers_client.update_router,
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
old mode 100755
new mode 100644
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 4cad52a..ae29049 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -22,15 +22,6 @@
class VolumeTypesNegativeTest(base.BaseVolumeAdminTest):
@decorators.attr(type=['negative'])
- @decorators.idempotent_id('b48c98f2-e662-4885-9b71-032256906314')
- def test_create_with_nonexistent_volume_type(self):
- # Should not be able to create volume with nonexistent volume_type.
- params = {'name': data_utils.rand_uuid(),
- 'volume_type': data_utils.rand_uuid()}
- self.assertRaises(lib_exc.NotFound,
- self.volumes_client.create_volume, **params)
-
- @decorators.attr(type=['negative'])
@decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
def test_create_with_empty_name(self):
# Should not be able to create volume type with an empty name.
diff --git a/tempest/api/volume/admin/test_volumes_list.py b/tempest/api/volume/admin/test_volumes_list.py
index 9d98b7a..6ce4a85 100644
--- a/tempest/api/volume/admin/test_volumes_list.py
+++ b/tempest/api/volume/admin/test_volumes_list.py
@@ -45,9 +45,9 @@
# Create a volume in admin tenant
adm_vol = self.admin_volume_client.create_volume(
size=CONF.volume.volume_size)['volume']
+ self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
waiters.wait_for_volume_resource_status(self.admin_volume_client,
adm_vol['id'], 'available')
- self.addCleanup(self.admin_volume_client.delete_volume, adm_vol['id'])
params = {'all_tenants': 1,
'project_id': self.volumes_client.tenant_id}
# Getting volume list from primary tenant using admin credentials
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 7d745f2..f139283 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -35,7 +35,6 @@
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
- cls.mountpoint = "/dev/vdc"
def create_image(self):
# Create image
@@ -176,7 +175,7 @@
self.volumes_client.attach_volume,
data_utils.rand_uuid(),
instance_uuid=server['id'],
- mountpoint=self.mountpoint)
+ mountpoint="/dev/vdc")
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index df0f5a5..86fe3f5 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -128,6 +128,8 @@
"this stage.")
raise ValueError(msg)
+ LOG.debug("Provisioning test server with validation resources %s",
+ validation_resources)
if 'security_groups' in kwargs:
kwargs['security_groups'].append(
{'name': validation_resources['security_group']['name']})
diff --git a/tempest/common/validation_resources.py b/tempest/common/validation_resources.py
deleted file mode 100644
index ae9d584..0000000
--- a/tempest/common/validation_resources.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from oslo_log import log as logging
-
-from tempest.lib.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-
-LOG = logging.getLogger(__name__)
-
-
-def _network_service(clients, use_neutron):
- # Internal helper to select the right network clients
- if use_neutron:
- return clients.network
- else:
- return clients.compute
-
-
-def create_ssh_security_group(clients, add_rule=False, ethertype='IPv4',
- use_neutron=True):
- """Create a security group for ping/ssh testing
-
- Create a security group to be attached to a VM using the nova or neutron
- clients. If rules are added, the group can be attached to a VM to enable
- connectivity validation over ICMP and further testing over SSH.
-
- :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
- or of a subclass of it. Resources are provisioned using clients from
- `clients`.
- :param add_rule: Whether security group rules are provisioned or not.
- Defaults to `False`.
- :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
- :param use_neutron: When True resources are provisioned via neutron, when
- False resources are provisioned via nova.
- :returns: A dictionary with the security group as returned by the API.
-
- Examples::
-
- from tempest.common import validation_resources as vr
- from tempest.lib import auth
- from tempest.lib.services import clients
-
- creds = auth.get_credentials('http://mycloud/identity/v3',
- username='me', project_name='me',
- password='secret', domain_name='Default')
- osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
- # Security group for IPv4 tests
- sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
- # Security group for IPv6 tests
- sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
- add_rule=True)
- """
- network_service = _network_service(clients, use_neutron)
- security_groups_client = network_service.SecurityGroupsClient()
- security_group_rules_client = network_service.SecurityGroupRulesClient()
- # Security Group clients for nova and neutron behave the same
- sg_name = data_utils.rand_name('securitygroup-')
- sg_description = data_utils.rand_name('description-')
- security_group = security_groups_client.create_security_group(
- name=sg_name, description=sg_description)['security_group']
- # Security Group Rules clients require different parameters depending on
- # the network service in use
- if add_rule:
- if use_neutron:
- security_group_rules_client.create_security_group_rule(
- security_group_id=security_group['id'],
- protocol='tcp',
- ethertype=ethertype,
- port_range_min=22,
- port_range_max=22,
- direction='ingress')
- security_group_rules_client.create_security_group_rule(
- security_group_id=security_group['id'],
- protocol='icmp',
- ethertype=ethertype,
- direction='ingress')
- else:
- security_group_rules_client.create_security_group_rule(
- parent_group_id=security_group['id'], ip_protocol='tcp',
- from_port=22, to_port=22)
- security_group_rules_client.create_security_group_rule(
- parent_group_id=security_group['id'], ip_protocol='icmp',
- from_port=-1, to_port=-1)
- LOG.debug("SSH Validation resource security group with tcp and icmp "
- "rules %s created", sg_name)
- return security_group
-
-
-def create_validation_resources(clients, keypair=False, floating_ip=False,
- security_group=False,
- security_group_rules=False,
- ethertype='IPv4', use_neutron=True,
- floating_network_id=None,
- floating_network_name=None):
- """Provision resources for VM ping/ssh testing
-
- Create resources required to be able to ping / ssh a virtual machine:
- keypair, security group, security group rules and a floating IP.
- Which of those resources are required may depend on the cloud setup and on
- the specific test and it can be controlled via the corresponding
- arguments.
-
- Provisioned resources are returned in a dictionary.
-
- :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
- or of a subclass of it. Resources are provisioned using clients from
- `clients`.
- :param keypair: Whether to provision a keypair. Defaults to False.
- :param floating_ip: Whether to provision a floating IP. Defaults to False.
- :param security_group: Whether to provision a security group. Defaults to
- False.
- :param security_group_rules: Whether to provision security group rules.
- Defaults to False.
- :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
- :param use_neutron: When True resources are provisioned via neutron, when
- False resources are provisioned via nova.
- :param floating_network_id: The id of the network used to provision a
- floating IP. Only used if a floating IP is requested and with neutron.
- :param floating_network_name: The name of the floating IP pool used to
- provision the floating IP. Only used if a floating IP is requested and
- with nova-net.
- :returns: A dictionary with the same keys as the input
- `validation_resources` and the resources for values in the format
- they are returned by the API.
-
- Examples::
-
- from tempest.common import validation_resources as vr
- from tempest.lib import auth
- from tempest.lib.services import clients
-
- creds = auth.get_credentials('http://mycloud/identity/v3',
- username='me', project_name='me',
- password='secret', domain_name='Default')
- osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
- # Request keypair and floating IP
- resources = dict(keypair=True, security_group=False,
- security_group_rules=False, floating_ip=True)
- resources = vr.create_validation_resources(
- osclients, use_neutron=True,
- floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
- **resources)
-
- # The floating IP to be attached to the VM
- floating_ip = resources['floating_ip']['ip']
- """
- # Create and Return the validation resources required to validate a VM
- validation_data = {}
- if keypair:
- keypair_name = data_utils.rand_name('keypair')
- validation_data.update(
- clients.compute.KeyPairsClient().create_keypair(
- name=keypair_name))
- LOG.debug("Validation resource key %s created", keypair_name)
- if security_group:
- validation_data['security_group'] = create_ssh_security_group(
- clients, add_rule=security_group_rules,
- use_neutron=use_neutron, ethertype=ethertype)
- if floating_ip:
- floating_ip_client = _network_service(
- clients, use_neutron).FloatingIPsClient()
- if use_neutron:
- floatingip = floating_ip_client.create_floatingip(
- floating_network_id=floating_network_id)
- # validation_resources['floating_ip'] has historically looked
- # like a compute API POST /os-floating-ips response, so we need
- # to mangle it a bit for a Neutron response with different
- # fields.
- validation_data['floating_ip'] = floatingip['floatingip']
- validation_data['floating_ip']['ip'] = (
- floatingip['floatingip']['floating_ip_address'])
- else:
- # NOTE(mriedem): The os-floating-ips compute API was deprecated
- # in the 2.36 microversion. Any tests for CRUD operations on
- # floating IPs using the compute API should be capped at 2.35.
- validation_data.update(floating_ip_client.create_floating_ip(
- pool=floating_network_name))
- return validation_data
-
-
-def clear_validation_resources(clients, keypair=None, floating_ip=None,
- security_group=None, use_neutron=True):
- """Cleanup resources for VM ping/ssh testing
-
- Cleanup a set of resources provisioned via `create_validation_resources`.
- In case of errors during cleanup, the exception is logged and the cleanup
- process is continued. The first exception that was raised is re-raised
- after the cleanup is complete.
-
- :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
- or of a subclass of it. Resources are provisioned using clients from
- `clients`.
- :param keypair: A dictionary with the keypair to be deleted. Defaults to
- None.
- :param floating_ip: A dictionary with the floating_ip to be deleted.
- Defaults to None.
- :param security_group: A dictionary with the security_group to be deleted.
- Defaults to None.
- :param use_neutron: When True resources are provisioned via neutron, when
- False resources are provisioned via nova.
- :returns: A dictionary with the same keys as the input
- `validation_resources` and the resources for values in the format
- they are returned by the API.
-
- Examples::
-
- from tempest.common import validation_resources as vr
- from tempest.lib import auth
- from tempest.lib.services import clients
-
- creds = auth.get_credentials('http://mycloud/identity/v3',
- username='me', project_name='me',
- password='secret', domain_name='Default')
- osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
- # Request keypair and floating IP
- resources = dict(keypair=True, security_group=False,
- security_group_rules=False, floating_ip=True)
- resources = vr.create_validation_resources(
- osclients, validation_resources=resources, use_neutron=True,
- floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C')
-
- # Now cleanup the resources
- try:
- vr.clear_validation_resources(osclients, use_neutron=True,
- **resources)
- except Exception as e:
- LOG.exception('Something went wrong during cleanup, ignoring')
- """
- has_exception = None
- if keypair:
- keypair_client = clients.compute.KeyPairsClient()
- keypair_name = keypair['name']
- try:
- keypair_client.delete_keypair(keypair_name)
- except lib_exc.NotFound:
- LOG.warning(
- "Keypair %s is not found when attempting to delete",
- keypair_name
- )
- except Exception as exc:
- LOG.exception('Exception raised while deleting key %s',
- keypair_name)
- if not has_exception:
- has_exception = exc
- network_service = _network_service(clients, use_neutron)
- if security_group:
- security_group_client = network_service.SecurityGroupsClient()
- sec_id = security_group['id']
- try:
- security_group_client.delete_security_group(sec_id)
- security_group_client.wait_for_resource_deletion(sec_id)
- except lib_exc.NotFound:
- LOG.warning("Security group %s is not found when attempting "
- "to delete", sec_id)
- except lib_exc.Conflict as exc:
- LOG.exception('Conflict while deleting security '
- 'group %s VM might not be deleted', sec_id)
- if not has_exception:
- has_exception = exc
- except Exception as exc:
- LOG.exception('Exception raised while deleting security '
- 'group %s', sec_id)
- if not has_exception:
- has_exception = exc
- if floating_ip:
- floating_ip_client = network_service.FloatingIPsClient()
- fip_id = floating_ip['id']
- try:
- if use_neutron:
- floating_ip_client.delete_floatingip(fip_id)
- else:
- floating_ip_client.delete_floating_ip(fip_id)
- except lib_exc.NotFound:
- LOG.warning('Floating ip %s not found while attempting to '
- 'delete', fip_id)
- except Exception as exc:
- LOG.exception('Exception raised while deleting ip %s', fip_id)
- if not has_exception:
- has_exception = exc
- if has_exception:
- raise has_exception
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 9a6c8f5..4f1a883 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -28,6 +28,43 @@
class DynamicCredentialProvider(cred_provider.CredentialProvider):
+ """Creates credentials dynamically for tests
+
+ A credential provider that, based on an initial set of
+ admin credentials, creates new credentials on the fly for
+ tests to use and then discard.
+
+ :param str identity_version: identity API version to use `v2` or `v3`
+ :param str admin_role: name of the admin role added to admin users
+ :param str name: names of dynamic resources include this parameter
+ when specified
+ :param str credentials_domain: name of the domain where the users
+ are created. If not defined, the project
+ domain from admin_credentials is used
+ :param dict network_resources: network resources to be created for
+ the created credentials
+ :param Credentials admin_creds: initial admin credentials
+ :param bool identity_admin_domain_scope: Set to true if admin should be
+ scoped to the domain. By
+ default this is False and the
+ admin role is scoped to the
+ project.
+ :param str identity_admin_role: The role name to use for admin
+ :param list extra_roles: A list of strings for extra roles that should
+ be assigned to all created users
+ :param bool neutron_available: Whether we are running in an environemnt
+ with neutron
+ :param bool create_networks: Whether dynamic project networks should be
+ created or not
+ :param project_network_cidr: The CIDR to use for created project
+ networks
+ :param project_network_mask_bits: The network mask bits to use for
+ created project networks
+ :param public_network_id: The id for the public network to use
+ :param identity_admin_endpoint_type: The endpoint type for identity
+ admin clients. Defaults to public.
+ :param identity_uri: Identity URI of the target cloud
+ """
def __init__(self, identity_version, name=None, network_resources=None,
credentials_domain=None, admin_role=None, admin_creds=None,
@@ -37,43 +74,6 @@
project_network_cidr=None, project_network_mask_bits=None,
public_network_id=None, resource_prefix=None,
identity_admin_endpoint_type='public', identity_uri=None):
- """Creates credentials dynamically for tests
-
- A credential provider that, based on an initial set of
- admin credentials, creates new credentials on the fly for
- tests to use and then discard.
-
- :param str identity_version: identity API version to use `v2` or `v3`
- :param str admin_role: name of the admin role added to admin users
- :param str name: names of dynamic resources include this parameter
- when specified
- :param str credentials_domain: name of the domain where the users
- are created. If not defined, the project
- domain from admin_credentials is used
- :param dict network_resources: network resources to be created for
- the created credentials
- :param Credentials admin_creds: initial admin credentials
- :param bool identity_admin_domain_scope: Set to true if admin should be
- scoped to the domain. By
- default this is False and the
- admin role is scoped to the
- project.
- :param str identity_admin_role: The role name to use for admin
- :param list extra_roles: A list of strings for extra roles that should
- be assigned to all created users
- :param bool neutron_available: Whether we are running in an environemnt
- with neutron
- :param bool create_networks: Whether dynamic project networks should be
- created or not
- :param project_network_cidr: The CIDR to use for created project
- networks
- :param project_network_mask_bits: The network mask bits to use for
- created project networks
- :param public_network_id: The id for the public network to use
- :param identity_admin_endpoint_type: The endpoint type for identity
- admin clients. Defaults to public.
- :param identity_uri: Identity URI of the target cloud
- """
super(DynamicCredentialProvider, self).__init__(
identity_version=identity_version, identity_uri=identity_uri,
admin_role=admin_role, name=name,
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index cd3a10e..83db513 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -41,6 +41,35 @@
class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
+ """Credentials provider using pre-provisioned accounts
+
+ This credentials provider loads the details of pre-provisioned
+ accounts from a YAML file, in the format specified by
+ ``etc/accounts.yaml.sample``. It locks accounts while in use, using the
+ external locking mechanism, allowing for multiple python processes
+ to share a single account file, and thus running tests in parallel.
+
+ The accounts_lock_dir must be generated using `lockutils.get_lock_path`
+ from the oslo.concurrency library. For instance::
+
+ accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
+ 'test_accounts')
+
+ Role names for object storage are optional as long as the
+ `operator` and `reseller_admin` credential types are not used in the
+ accounts file.
+
+ :param identity_version: identity version of the credentials
+ :param admin_role: name of the admin role
+ :param test_accounts_file: path to the accounts YAML file
+ :param accounts_lock_dir: the directory for external locking
+ :param name: name of the hash file (optional)
+ :param credentials_domain: name of the domain credentials belong to
+ (if no domain is configured)
+ :param object_storage_operator_role: name of the role
+ :param object_storage_reseller_admin_role: name of the role
+ :param identity_uri: Identity URI of the target cloud
+ """
# Exclude from the hash fields specific to v2 or v3 identity API
# i.e. only include user*, project*, tenant* and password
@@ -51,35 +80,6 @@
accounts_lock_dir, name=None, credentials_domain=None,
admin_role=None, object_storage_operator_role=None,
object_storage_reseller_admin_role=None, identity_uri=None):
- """Credentials provider using pre-provisioned accounts
-
- This credentials provider loads the details of pre-provisioned
- accounts from a YAML file, in the format specified by
- `etc/accounts.yaml.sample`. It locks accounts while in use, using the
- external locking mechanism, allowing for multiple python processes
- to share a single account file, and thus running tests in parallel.
-
- The accounts_lock_dir must be generated using `lockutils.get_lock_path`
- from the oslo.concurrency library. For instance:
-
- accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
- 'test_accounts')
-
- Role names for object storage are optional as long as the
- `operator` and `reseller_admin` credential types are not used in the
- accounts file.
-
- :param identity_version: identity version of the credentials
- :param admin_role: name of the admin role
- :param test_accounts_file: path to the accounts YAML file
- :param accounts_lock_dir: the directory for external locking
- :param name: name of the hash file (optional)
- :param credentials_domain: name of the domain credentials belong to
- (if no domain is configured)
- :param object_storage_operator_role: name of the role
- :param object_storage_reseller_admin_role: name of the role
- :param identity_uri: Identity URI of the target cloud
- """
super(PreProvisionedCredentialProvider, self).__init__(
identity_version=identity_version, name=name,
admin_role=admin_role, credentials_domain=credentials_domain,
diff --git a/tempest/lib/common/validation_resources.py b/tempest/lib/common/validation_resources.py
new file mode 100644
index 0000000..c35a01a
--- /dev/null
+++ b/tempest/lib/common/validation_resources.py
@@ -0,0 +1,457 @@
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2017 IBM Corp.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
+
+LOG = logging.getLogger(__name__)
+
+
+def _network_service(clients, use_neutron):
+ # Internal helper to select the right network clients
+ if use_neutron:
+ return clients.network
+ else:
+ return clients.compute
+
+
+def create_ssh_security_group(clients, add_rule=False, ethertype='IPv4',
+ use_neutron=True):
+ """Create a security group for ping/ssh testing
+
+ Create a security group to be attached to a VM using the nova or neutron
+ clients. If rules are added, the group can be attached to a VM to enable
+ connectivity validation over ICMP and further testing over SSH.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param add_rule: Whether security group rules are provisioned or not.
+ Defaults to `False`.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+ :returns: A dictionary with the security group as returned by the API.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Security group for IPv4 tests
+ sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
+ # Security group for IPv6 tests
+ sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
+ add_rule=True)
+ """
+ network_service = _network_service(clients, use_neutron)
+ security_groups_client = network_service.SecurityGroupsClient()
+ security_group_rules_client = network_service.SecurityGroupRulesClient()
+ # Security Group clients for nova and neutron behave the same
+ sg_name = data_utils.rand_name('securitygroup-')
+ sg_description = data_utils.rand_name('description-')
+ security_group = security_groups_client.create_security_group(
+ name=sg_name, description=sg_description)['security_group']
+ # Security Group Rules clients require different parameters depending on
+ # the network service in use
+ if add_rule:
+ try:
+ if use_neutron:
+ security_group_rules_client.create_security_group_rule(
+ security_group_id=security_group['id'],
+ protocol='tcp',
+ ethertype=ethertype,
+ port_range_min=22,
+ port_range_max=22,
+ direction='ingress')
+ security_group_rules_client.create_security_group_rule(
+ security_group_id=security_group['id'],
+ protocol='icmp',
+ ethertype=ethertype,
+ direction='ingress')
+ else:
+ security_group_rules_client.create_security_group_rule(
+ parent_group_id=security_group['id'], ip_protocol='tcp',
+ from_port=22, to_port=22)
+ security_group_rules_client.create_security_group_rule(
+ parent_group_id=security_group['id'], ip_protocol='icmp',
+ from_port=-1, to_port=-1)
+ except Exception as sgc_exc:
+ # If adding security group rules fails, we cleanup the SG before
+ # re-raising the failure up
+ with excutils.save_and_reraise_exception():
+ try:
+ msg = ('Error while provisioning security group rules in '
+ 'security group %s. Trying to cleanup.')
+ # The exceptions logging is already handled, so using
+ # debug here just to provide more context
+ LOG.debug(msg, sgc_exc)
+ clear_validation_resources(
+ clients, keypair=None, floating_ip=None,
+ security_group=security_group,
+ use_neutron=use_neutron)
+ except Exception as cleanup_exc:
+ msg = ('Error during cleanup of a security group. '
+ 'The cleanup was triggered by an exception during '
+ 'the provisioning of security group rules.\n'
+ 'Provisioning exception: %s\n'
+ 'First cleanup exception: %s')
+ LOG.exception(msg, sgc_exc, cleanup_exc)
+ LOG.debug("SSH Validation resource security group with tcp and icmp "
+ "rules %s created", sg_name)
+ return security_group
+
+
+def create_validation_resources(clients, keypair=False, floating_ip=False,
+ security_group=False,
+ security_group_rules=False,
+ ethertype='IPv4', use_neutron=True,
+ floating_network_id=None,
+ floating_network_name=None):
+ """Provision resources for VM ping/ssh testing
+
+ Create resources required to be able to ping / ssh a virtual machine:
+ keypair, security group, security group rules and a floating IP.
+ Which of those resources are required may depend on the cloud setup and on
+ the specific test and it can be controlled via the corresponding
+ arguments.
+
+ Provisioned resources are returned in a dictionary.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: Whether to provision a keypair. Defaults to False.
+ :param floating_ip: Whether to provision a floating IP. Defaults to False.
+ :param security_group: Whether to provision a security group. Defaults to
+ False.
+ :param security_group_rules: Whether to provision security group rules.
+ Defaults to False.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+ :param floating_network_id: The id of the network used to provision a
+ floating IP. Only used if a floating IP is requested and with neutron.
+ :param floating_network_name: The name of the floating IP pool used to
+ provision the floating IP. Only used if a floating IP is requested and
+ with nova-net.
+ :returns: A dictionary with the resources in the format they are returned
+ by the API. Valid keys are 'keypair', 'floating_ip' and
+ 'security_group'.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False, floating_ip=True)
+ resources = vr.create_validation_resources(
+ osclients, use_neutron=True,
+ floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
+ **resources)
+
+ # The floating IP to be attached to the VM
+ floating_ip = resources['floating_ip']['ip']
+ """
+ # Create and Return the validation resources required to validate a VM
+ msg = ('Requested validation resources keypair %s, floating IP %s, '
+ 'security group %s')
+ LOG.debug(msg, keypair, floating_ip, security_group)
+ validation_data = {}
+ try:
+ if keypair:
+ keypair_name = data_utils.rand_name('keypair')
+ validation_data.update(
+ clients.compute.KeyPairsClient().create_keypair(
+ name=keypair_name))
+ LOG.debug("Validation resource key %s created", keypair_name)
+ if security_group:
+ validation_data['security_group'] = create_ssh_security_group(
+ clients, add_rule=security_group_rules,
+ use_neutron=use_neutron, ethertype=ethertype)
+ if floating_ip:
+ floating_ip_client = _network_service(
+ clients, use_neutron).FloatingIPsClient()
+ if use_neutron:
+ floatingip = floating_ip_client.create_floatingip(
+ floating_network_id=floating_network_id)
+ # validation_resources['floating_ip'] has historically looked
+ # like a compute API POST /os-floating-ips response, so we need
+ # to mangle it a bit for a Neutron response with different
+ # fields.
+ validation_data['floating_ip'] = floatingip['floatingip']
+ validation_data['floating_ip']['ip'] = (
+ floatingip['floatingip']['floating_ip_address'])
+ else:
+ # NOTE(mriedem): The os-floating-ips compute API was deprecated
+ # in the 2.36 microversion. Any tests for CRUD operations on
+ # floating IPs using the compute API should be capped at 2.35.
+ validation_data.update(floating_ip_client.create_floating_ip(
+ pool=floating_network_name))
+ LOG.debug("Validation resource floating IP %s created",
+ validation_data['floating_ip'])
+ except Exception as prov_exc:
+ # If something goes wrong, cleanup as much as possible before we
+ # re-raise the exception
+ with excutils.save_and_reraise_exception():
+ if validation_data:
+ # Cleanup may fail as well
+ try:
+ msg = ('Error while provisioning validation resources %s. '
+ 'Trying to cleanup what we provisioned so far: %s')
+ # The exceptions logging is already handled, so using
+ # debug here just to provide more context
+ LOG.debug(msg, prov_exc, str(validation_data))
+ clear_validation_resources(
+ clients,
+ keypair=validation_data.get('keypair', None),
+ floating_ip=validation_data.get('floating_ip', None),
+ security_group=validation_data.get('security_group',
+ None),
+ use_neutron=use_neutron)
+ except Exception as cleanup_exc:
+ msg = ('Error during cleanup of validation resources. '
+ 'The cleanup was triggered by an exception during '
+ 'the provisioning step.\n'
+ 'Provisioning exception: %s\n'
+ 'First cleanup exception: %s')
+ LOG.exception(msg, prov_exc, cleanup_exc)
+ return validation_data
+
+
+def clear_validation_resources(clients, keypair=None, floating_ip=None,
+ security_group=None, use_neutron=True):
+ """Cleanup resources for VM ping/ssh testing
+
+ Cleanup a set of resources provisioned via `create_validation_resources`.
+ In case of errors during cleanup, the exception is logged and the cleanup
+ process is continued. The first exception that was raised is re-raised
+ after the cleanup is complete.
+
+ :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
+ or of a subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: A dictionary with the keypair to be deleted. Defaults to
+ None.
+ :param floating_ip: A dictionary with the floating_ip to be deleted.
+ Defaults to None.
+ :param security_group: A dictionary with the security_group to be deleted.
+ Defaults to None.
+ :param use_neutron: When True resources are provisioned via neutron, when
+ False resources are provisioned via nova.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+
+ creds = auth.get_credentials('http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+ osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False, floating_ip=True)
+ resources = vr.create_validation_resources(
+ osclients, validation_resources=resources, use_neutron=True,
+ floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C')
+
+ # Now cleanup the resources
+ try:
+ vr.clear_validation_resources(osclients, use_neutron=True,
+ **resources)
+ except Exception as e:
+ LOG.exception('Something went wrong during cleanup, ignoring')
+ """
+ has_exception = None
+ if keypair:
+ keypair_client = clients.compute.KeyPairsClient()
+ keypair_name = keypair['name']
+ try:
+ keypair_client.delete_keypair(keypair_name)
+ except lib_exc.NotFound:
+ LOG.warning(
+ "Keypair %s is not found when attempting to delete",
+ keypair_name
+ )
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting key %s',
+ keypair_name)
+ if not has_exception:
+ has_exception = exc
+ network_service = _network_service(clients, use_neutron)
+ if security_group:
+ security_group_client = network_service.SecurityGroupsClient()
+ sec_id = security_group['id']
+ try:
+ security_group_client.delete_security_group(sec_id)
+ security_group_client.wait_for_resource_deletion(sec_id)
+ except lib_exc.NotFound:
+ LOG.warning("Security group %s is not found when attempting "
+ "to delete", sec_id)
+ except lib_exc.Conflict as exc:
+ LOG.exception('Conflict while deleting security '
+ 'group %s VM might not be deleted', sec_id)
+ if not has_exception:
+ has_exception = exc
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting security '
+ 'group %s', sec_id)
+ if not has_exception:
+ has_exception = exc
+ if floating_ip:
+ floating_ip_client = network_service.FloatingIPsClient()
+ fip_id = floating_ip['id']
+ try:
+ if use_neutron:
+ floating_ip_client.delete_floatingip(fip_id)
+ else:
+ floating_ip_client.delete_floating_ip(fip_id)
+ except lib_exc.NotFound:
+ LOG.warning('Floating ip %s not found while attempting to '
+ 'delete', fip_id)
+ except Exception as exc:
+ LOG.exception('Exception raised while deleting ip %s', fip_id)
+ if not has_exception:
+ has_exception = exc
+ if has_exception:
+ raise has_exception
+
+
+class ValidationResourcesFixture(fixtures.Fixture):
+ """Fixture to provision and cleanup validation resources"""
+
+ DICT_KEYS = ['keypair', 'security_group', 'floating_ip']
+
+ def __init__(self, clients, keypair=False, floating_ip=False,
+ security_group=False, security_group_rules=False,
+ ethertype='IPv4', use_neutron=True, floating_network_id=None,
+ floating_network_name=None):
+ """Create a ValidationResourcesFixture
+
+ Create a ValidationResourcesFixture fixtures, which provisions the
+ resources required to be able to ping / ssh a virtual machine upon
+ setUp and clears them out upon cleanup. Resources are keypair,
+ security group, security group rules and a floating IP - depending
+ on the params.
+
+ The fixture exposes a dictionary that includes provisioned resources.
+
+ :param clients: `tempest.lib.services.clients.ServiceClients` or of a
+ subclass of it. Resources are provisioned using clients from
+ `clients`.
+ :param keypair: Whether to provision a keypair. Defaults to False.
+ :param floating_ip: Whether to provision a floating IP.
+ Defaults to False.
+ :param security_group: Whether to provision a security group.
+ Defaults to False.
+ :param security_group_rules: Whether to provision security group rules.
+ Defaults to False.
+ :param ethertype: 'IPv4' or 'IPv6'. Honoured only if neutron is used.
+ :param use_neutron: When True resources are provisioned via neutron,
+ when False resources are provisioned via nova.
+ :param floating_network_id: The id of the network used to provision a
+ floating IP. Only used if a floating IP is requested in case
+ neutron is used.
+ :param floating_network_name: The name of the floating IP pool used to
+ provision the floating IP. Only used if a floating IP is requested
+ and with nova-net.
+ :returns: A dictionary with the same keys as the input
+ `validation_resources` and the resources for values in the format
+ they are returned by the API.
+
+ Examples::
+
+ from tempest.common import validation_resources as vr
+ from tempest.lib import auth
+ from tempest.lib.services import clients
+ import testtools
+
+
+ class TestWithVR(testtools.TestCase):
+
+ def setUp(self):
+ creds = auth.get_credentials(
+ 'http://mycloud/identity/v3',
+ username='me', project_name='me',
+ password='secret', domain_name='Default')
+
+ osclients = clients.ServiceClients(
+ creds, 'http://mycloud/identity/v3')
+ # Request keypair and floating IP
+ resources = dict(keypair=True, security_group=False,
+ security_group_rules=False,
+ floating_ip=True)
+ network_id = '4240E68E-23DA-4C82-AC34-9FEFAA24521C'
+ self.vr = self.useFixture(vr.ValidationResourcesFixture(
+ osclients, use_neutron=True,
+ floating_network_id=network_id,
+ **resources)
+
+ def test_use_ip(self):
+ # The floating IP to be attached to the VM
+ floating_ip = self.vr['floating_ip']['ip']
+ """
+ self._clients = clients
+ self._keypair = keypair
+ self._floating_ip = floating_ip
+ self._security_group = security_group
+ self._security_group_rules = security_group_rules
+ self._ethertype = ethertype
+ self._use_neutron = use_neutron
+ self._floating_network_id = floating_network_id
+ self._floating_network_name = floating_network_name
+ self._validation_resources = None
+
+ def _setUp(self):
+ msg = ('Requested setup of ValidationResources keypair %s, floating '
+ 'IP %s, security group %s')
+ LOG.debug(msg, self._keypair, self._floating_ip, self._security_group)
+ self._validation_resources = create_validation_resources(
+ self._clients, keypair=self._keypair,
+ floating_ip=self._floating_ip,
+ security_group=self._security_group,
+ security_group_rules=self._security_group_rules,
+ ethertype=self._ethertype, use_neutron=self._use_neutron,
+ floating_network_id=self._floating_network_id,
+ floating_network_name=self._floating_network_name)
+ # If provisioning raises an exception we won't have anything to
+ # cleanup here, so we don't need a try-finally around provisioning
+ vr = self._validation_resources
+ self.addCleanup(clear_validation_resources, self._clients,
+ keypair=vr.get('keypair', None),
+ floating_ip=vr.get('floating_ip', None),
+ security_group=vr.get('security_group', None),
+ use_neutron=self._use_neutron)
+
+ @property
+ def resources(self):
+ return self._validation_resources
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
old mode 100755
new mode 100644
diff --git a/tempest/lib/services/volume/v2/encryption_types_client.py b/tempest/lib/services/volume/v2/encryption_types_client.py
old mode 100755
new mode 100644
diff --git a/tempest/test.py b/tempest/test.py
index a4cc2cc..7d95bcf 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,10 +26,10 @@
from tempest import clients
from tempest.common import credentials_factory as credentials
from tempest.common import utils
-import tempest.common.validation_resources as vresources
from tempest import config
from tempest.lib.common import cred_client
from tempest.lib.common import fixed_network
+from tempest.lib.common import validation_resources as vr
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -105,9 +105,16 @@
# a list of roles - the first element of the list being a label, and the
# rest the actual roles
credentials = []
+
+ # Network resources to be provisioned for the requested test credentials.
+ # Only used with the dynamic credentials provider.
+ _network_resources = {}
+
+ # Stack of resource cleanups
+ _class_cleanups = []
+
# Resources required to validate a server using ssh
- validation_resources = {}
- network_resources = {}
+ _validation_resources = {}
# NOTE(sdague): log_format is defined inline here instead of using the oslo
# default because going through the config path recouples config to the
@@ -122,7 +129,16 @@
TIMEOUT_SCALING_FACTOR = 1
@classmethod
+ def _reset_class(cls):
+ cls.__setup_credentials_called = False
+ cls.__resource_cleaup_called = False
+ cls.__skip_checks_called = False
+ cls._class_cleanups = []
+
+ @classmethod
def setUpClass(cls):
+ # Reset state
+ cls._reset_class()
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
@@ -131,10 +147,16 @@
cls.teardowns = []
# All the configuration checks that may generate a skip
cls.skip_checks()
+ if not cls.__skip_checks_called:
+ raise RuntimeError("skip_checks for %s did not call the super's "
+ "skip_checks" % cls.__name__)
try:
# Allocation of all required credentials and client managers
cls.teardowns.append(('credentials', cls.clear_credentials))
cls.setup_credentials()
+ if not cls.__setup_credentials_called:
+ raise RuntimeError("setup_credentials for %s did not call the "
+ "super's setup_credentials" % cls.__name__)
# Shortcuts to clients
cls.setup_clients()
# Additional class-wide test resources
@@ -171,12 +193,23 @@
# exception at the end
try:
teardown()
+ if name == 'resources':
+ if not cls.__resource_cleaup_called:
+ raise RuntimeError(
+ "resource_cleanup for %s did not call the "
+ "super's resource_cleanup" % cls.__name__)
except Exception as te:
sys_exec_info = sys.exc_info()
tetype = sys_exec_info[0]
- # TODO(andreaf): Till we have the ability to cleanup only
- # resources that were successfully setup in resource_cleanup,
- # log AttributeError as info instead of exception.
+ # TODO(andreaf): Resource cleanup is often implemented by
+ # storing an array of resources at class level, and cleaning
+ # them up during `resource_cleanup`.
+ # In case of failure during setup, some resource arrays might
+ # not be defined at all, in which case the cleanup code might
+ # trigger an AttributeError. In such cases we log
+ # AttributeError as info instead of exception. Once all
+ # cleanups are migrated to addClassResourceCleanup we can
+ # remove this.
if tetype is AttributeError and name == 'resources':
LOG.info("tearDownClass of %s failed: %s", name, te)
else:
@@ -212,13 +245,37 @@
"""Class level skip checks.
Subclasses verify in here all conditions that might prevent the
- execution of the entire test class.
- Checks implemented here may not make use API calls, and should rely on
- configuration alone.
- In general skip checks that require an API call are discouraged.
- If one is really needed it may be implemented either in the
- resource_setup or at test level.
+ execution of the entire test class. Skipping here prevents any other
+ class fixture from being executed i.e. no credentials or other
+ resource allocation will happen.
+
+ Tests defined in the test class will no longer appear in test results.
+ The `setUpClass` for the entire test class will be marked as SKIPPED
+ instead.
+
+ At this stage no test credentials are available, so skip checks
+ should rely on configuration alone. This is deliberate since skips
+ based on the result of an API call are discouraged.
+
+ The following checks are implemented in `test.py` already:
+ - check that alt credentials are available when requested by the test
+ - check that admin credentials are available when requested by the test
+ - check that the identity version specified by the test is marked as
+ enabled in the configuration
+
+ Overriders of skip_checks must always invoke skip_check on `super`
+ first.
+
+ Example::
+
+ @classmethod
+ def skip_checks(cls):
+ super(Example, cls).skip_checks()
+ if not CONF.service_available.my_service:
+ skip_msg = ("%s skipped as my_service is not available")
+ raise cls.skipException(skip_msg % cls.__name__)
"""
+ cls.__skip_checks_called = True
identity_version = cls.get_identity_version()
# setting force_tenant_isolation to True also needs admin credentials.
if ('admin' in cls.credentials or
@@ -250,6 +307,7 @@
set_network_resources() method, otherwise it will create
network resources(network resources are created in a later step).
"""
+ cls.__setup_credentials_called = True
for credentials_type in cls.credentials:
# This may raise an exception in case credentials are not available
# In that case we want to let the exception through and the test
@@ -299,42 +357,129 @@
@classmethod
def resource_setup(cls):
- """Class level resource setup for test cases."""
- if (CONF.validation.ip_version_for_ssh not in (4, 6) and
- CONF.service_available.neutron):
- msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
- raise lib_exc.InvalidConfiguration(
- msg % CONF.validation.ip_version_for_ssh)
- if hasattr(cls, "os_primary"):
- vr = cls.validation_resources
- cls.validation_resources = vresources.create_validation_resources(
- cls.os_primary,
- use_neutron=CONF.service_available.neutron,
- ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
- floating_network_id=CONF.network.public_network_id,
- floating_network_name=CONF.network.floating_network_name,
- **vr)
- else:
- LOG.warning("Client manager not found, validation resources not"
- " created")
+ """Class level resource setup for test cases.
+
+ `resource_setup` is invoked once all credentials (and related network
+ resources have been provisioned and after client aliases - if any -
+ have been defined.
+
+ The use case for `resource_setup` is test optimization: provisioning
+ of project-specific "expensive" resources that are not dirtied by tests
+ and can thus safely be re-used by multiple tests.
+
+ System wide resources shared by all tests could instead be provisioned
+ only once, before the test run.
+
+ Resources provisioned here must be cleaned up during
+ `resource_cleanup`. This is best achieved by scheduling a cleanup via
+ `addClassResourceCleanup`.
+
+ Some test resources have an asynchronous delete process. It's best
+ practice for them to schedule a wait for delete via
+ `addClassResourceCleanup` to avoid having resources in process of
+ deletion when we reach the credentials cleanup step.
+
+ Example::
+
+ @classmethod
+ def resource_setup(cls):
+ super(MyTest, cls).resource_setup()
+ servers = cls.os_primary.compute.ServersClient()
+ # Schedule delete and wait so that we can first delete the
+ # two servers and then wait for both to delete
+ # Create server 1
+ cls.shared_server = servers.create_server()
+ # Create server 2. If something goes wrong we schedule cleanup
+ # of server 1 anyways.
+ try:
+ cls.shared_server2 = servers.create_server()
+ # Wait server 2
+ cls.addClassResourceCleanup(
+ waiters.wait_for_server_termination,
+ servers, cls.shared_server2['id'],
+ ignore_error=False)
+ finally:
+ # Wait server 1
+ cls.addClassResourceCleanup(
+ waiters.wait_for_server_termination,
+ servers, cls.shared_server['id'],
+ ignore_error=False)
+ # Delete server 1
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ servers.delete_server,
+ cls.shared_server['id'])
+ # Delete server 2 (if it was created)
+ if hasattr(cls, 'shared_server2'):
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ servers.delete_server,
+ cls.shared_server2['id'])
+ """
+ pass
@classmethod
def resource_cleanup(cls):
"""Class level resource cleanup for test cases.
- Resource cleanup must be able to handle the case of partially setup
- resources, in case a failure during `resource_setup` should happen.
+ Resource cleanup processes the stack of cleanups produced by
+ `addClassResourceCleanup` and then cleans up validation resources
+ if any were provisioned.
+
+ All cleanups are processed whatever the outcome. Exceptions are
+ accumulated and re-raised as a `MultipleExceptions` at the end.
+
+ In most cases test cases won't need to override `resource_cleanup`,
+ but if they do they must invoke `resource_cleanup` on super.
+
+ Example::
+
+ class TestWithReallyComplexCleanup(test.BaseTestCase):
+
+ @classmethod
+ def resource_setup(cls):
+ # provision resource A
+ cls.addClassResourceCleanup(delete_resource, A)
+ # provision resource B
+ cls.addClassResourceCleanup(delete_resource, B)
+
+ @classmethod
+ def resource_cleanup(cls):
+ # It's possible to override resource_cleanup but in most
+ # cases it shouldn't be required. Nothing that may fail
+ # should be executed before the call to super since it
+ # might cause resource leak in case of error.
+ super(TestWithReallyComplexCleanup, cls).resource_cleanup()
+ # At this point test credentials are still available but
+ # anything from the cleanup stack has been already deleted.
"""
- if cls.validation_resources:
- if hasattr(cls, "os_primary"):
- vr = cls.validation_resources
- vresources.clear_validation_resources(
- cls.os_primary,
- use_neutron=CONF.service_available.neutron, **vr)
- cls.validation_resources = {}
- else:
- LOG.warning("Client manager not found, validation resources "
- "not deleted")
+ cls.__resource_cleaup_called = True
+ cleanup_errors = []
+ while cls._class_cleanups:
+ try:
+ fn, args, kwargs = cls._class_cleanups.pop()
+ fn(*args, **kwargs)
+ except Exception:
+ cleanup_errors.append(sys.exc_info())
+ if cleanup_errors:
+ raise testtools.MultipleExceptions(*cleanup_errors)
+
+ @classmethod
+ def addClassResourceCleanup(cls, fn, *arguments, **keywordArguments):
+ """Add a cleanup function to be called during resource_cleanup.
+
+ Functions added with addClassResourceCleanup will be called in reverse
+ order of adding at the beginning of resource_cleanup, before any
+ credential, networking or validation resources cleanup is processed.
+
+ If a function added with addClassResourceCleanup raises an exception,
+ the error will be recorded as a test error, and the next cleanup will
+ then be run.
+
+ Cleanup functions are always called during the test class tearDown
+ fixture, even if an exception occured during setUp or tearDown.
+ """
+ cls._class_cleanups.append((fn, arguments, keywordArguments))
def setUp(self):
super(BaseTestCase, self).setUp()
@@ -421,7 +566,7 @@
False)
cls._creds_provider = credentials.get_credentials_provider(
- name=cls.__name__, network_resources=cls.network_resources,
+ name=cls.__name__, network_resources=cls._network_resources,
force_tenant_isolation=force_tenant_isolation)
return cls._creds_provider
@@ -476,62 +621,128 @@
if hasattr(cls, '_creds_provider'):
cls._creds_provider.clear_creds()
+ @staticmethod
+ def _validation_resources_params_from_conf():
+ return dict(
+ keypair=(CONF.validation.auth_method.lower() == "keypair"),
+ floating_ip=(CONF.validation.connect_method.lower() == "floating"),
+ security_group=CONF.validation.security_group,
+ security_group_rules=CONF.validation.security_group_rules,
+ use_neutron=CONF.service_available.neutron,
+ ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
+ floating_network_id=CONF.network.public_network_id,
+ floating_network_name=CONF.network.floating_network_name)
+
@classmethod
- def set_validation_resources(cls, keypair=None, floating_ip=None,
- security_group=None,
- security_group_rules=None):
- """Specify which ssh server validation resources should be created.
+ def get_class_validation_resources(cls, os_clients):
+ """Provision validation resources according to configuration
- Each of the argument must be set to either None, True or False, with
- None - use default from config (security groups and security group
- rules get created when set to None)
- False - Do not create the validation resource
- True - create the validation resource
+ This is a wrapper around `create_validation_resources` from
+ `tempest.common.validation_resources` that passes parameters from
+ Tempest configuration. Only one instance of class level
+ validation resources is managed by the helper, so If resources
+ were already provisioned before, existing ones will be returned.
- @param keypair
- @param security_group
- @param security_group_rules
- @param floating_ip
+ Resources are returned as a dictionary. They are also scheduled for
+ automatic cleanup during class teardown using
+ `addClassResourcesCleanup`.
+
+ If `CONF.validation.run_validation` is False no resource will be
+ provisioned at all.
+
+ @param os_clients: Clients to be used to provision the resources.
"""
if not CONF.validation.run_validation:
return
- if keypair is None:
- keypair = (CONF.validation.auth_method.lower() == "keypair")
+ if os_clients in cls._validation_resources:
+ return cls._validation_resources[os_clients]
- if floating_ip is None:
- floating_ip = (CONF.validation.connect_method.lower() ==
- "floating")
+ if (CONF.validation.ip_version_for_ssh not in (4, 6) and
+ CONF.service_available.neutron):
+ msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
+ raise lib_exc.InvalidConfiguration(
+ msg % CONF.validation.ip_version_for_ssh)
- if security_group is None:
- security_group = CONF.validation.security_group
+ resources = vr.create_validation_resources(
+ os_clients,
+ **cls._validation_resources_params_from_conf())
- if security_group_rules is None:
- security_group_rules = CONF.validation.security_group_rules
+ cls.addClassResourceCleanup(
+ vr.clear_validation_resources, os_clients,
+ use_neutron=CONF.service_available.neutron,
+ **resources)
+ cls._validation_resources[os_clients] = resources
+ return resources
- if not cls.validation_resources:
- cls.validation_resources = {
- 'keypair': keypair,
- 'security_group': security_group,
- 'security_group_rules': security_group_rules,
- 'floating_ip': floating_ip}
+ def get_test_validation_resources(self, os_clients):
+ """Returns a dict of validation resources according to configuration
+
+ Initialise a validation resources fixture based on configuration.
+ Start the fixture and returns the validation resources.
+
+ If `CONF.validation.run_validation` is False no resource will be
+ provisioned at all.
+
+ @param os_clients: Clients to be used to provision the resources.
+ """
+
+ params = {}
+ # Test will try to use the fixture, so for this to be useful
+ # we must return a fixture. If validation is disabled though
+ # we don't need to provision anything, which is the default
+ # behavior for the fixture.
+ if CONF.validation.run_validation:
+ params = self._validation_resources_params_from_conf()
+
+ validation = self.useFixture(
+ vr.ValidationResourcesFixture(os_clients, **params))
+ return validation.resources
@classmethod
def set_network_resources(cls, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
+ The dynamic credentials provider by default provisions network
+ resources for each user/project that is provisioned. This behavior
+ can be altered using this method, which allows tests to define which
+ specific network resources to be provisioned - none if no parameter
+ is specified.
+
+ Credentials are provisioned as part of the class setup fixture,
+ during the `setup_credentials` step. For this to be effective this
+ helper must be invoked before super's `setup_credentials` is executed.
+
@param network
@param router
@param subnet
@param dhcp
+
+ Example::
+
+ @classmethod
+ def setup_credentials(cls):
+ # Do not setup network resources for this test
+ cls.set_network_resources()
+ super(MyTest, cls).setup_credentials()
"""
- # network resources should be set only once from callers
+ # If this is invoked after the credentials are setup, it won't take
+ # any effect. To avoid this situation, fail the test in case this was
+ # invoked too late in the test lifecycle.
+ if cls.__setup_credentials_called:
+ raise RuntimeError(
+ "set_network_resources invoked after setup_credentials on the "
+ "super class has been already invoked. For "
+ "set_network_resources to have effect please invoke it before "
+ "the call to super().setup_credentials")
+
+ # Network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
- # in the leaf class
- if not cls.network_resources:
- cls.network_resources = {
+ # in the leaf class.
+ if not cls._network_resources:
+ cls._network_resources = {
'network': network,
'router': router,
'subnet': subnet,
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 6345728..5024100 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -37,14 +37,16 @@
fake_image = mock.Mock(response={'location': image_id})
compute_images_client.create_image.return_value = fake_image
# call the utility method
- image = compute_base.BaseV2ComputeTest.create_image_from_server(
- mock.sentinel.server_id, name='fake-snapshot-name')
+ cleanup_path = 'tempest.test.BaseTestCase.addClassResourceCleanup'
+ with mock.patch(cleanup_path) as mock_cleanup:
+ image = compute_base.BaseV2ComputeTest.create_image_from_server(
+ mock.sentinel.server_id, name='fake-snapshot-name')
self.assertEqual(fake_image, image)
# make our assertions
compute_images_client.create_image.assert_called_once_with(
mock.sentinel.server_id, name='fake-snapshot-name')
- self.assertEqual(1, len(compute_base.BaseV2ComputeTest.images))
- self.assertEqual(image_id, compute_base.BaseV2ComputeTest.images[0])
+ mock_cleanup.assert_called_once()
+ self.assertIn(image_id, mock_cleanup.call_args[0])
@mock.patch.multiple(compute_base.BaseV2ComputeTest,
compute_images_client=mock.DEFAULT,
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index 6aa7a42..ebcf5d1 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -40,6 +40,7 @@
from tempest.tests import fake_config
from tempest.tests.lib import fake_http
from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
class TestDynamicCredentialProvider(base.TestCase):
@@ -62,6 +63,7 @@
def setUp(self):
super(TestDynamicCredentialProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
+ self.useFixture(registry_fixture.RegistryFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.patchobject(self.token_client_class, 'raw_request',
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 5402e47..9b10159 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -32,6 +32,7 @@
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_identity
+from tempest.tests.lib.services import registry_fixture
class TestPreProvisionedCredentials(base.TestCase):
@@ -92,9 +93,8 @@
return_value=self.test_accounts))
self.useFixture(fixtures.MockPatch(
'os.path.isfile', return_value=True))
- # NOTE(andreaf) Ensure config is loaded so service clients are
- # registered in the registry before tests
- config.service_client_config()
+ # Make sure we leave the registry clean
+ self.useFixture(registry_fixture.RegistryFixture())
def tearDown(self):
super(TestPreProvisionedCredentials, self).tearDown()
diff --git a/tempest/tests/lib/common/test_validation_resources.py b/tempest/tests/lib/common/test_validation_resources.py
new file mode 100644
index 0000000..d5139f4
--- /dev/null
+++ b/tempest/tests/lib/common/test_validation_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2017 IBM Corp.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+import mock
+import testtools
+
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest.lib.services import clients
+from tempest.tests import base
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+FAKE_SECURITY_GROUP = {'security_group': {'id': 'sg_id'}}
+FAKE_KEYPAIR = {'keypair': {'name': 'keypair_name'}}
+FAKE_FIP_NOVA_NET = {'floating_ip': {'ip': '1.2.3.4', 'id': '1234'}}
+FAKE_FIP_NEUTRON = {'floatingip': {'floating_ip_address': '1.2.3.4',
+ 'id': '1234'}}
+
+SERVICES = 'tempest.lib.services'
+SG_CLIENT = (SERVICES + '.%s.security_groups_client.SecurityGroupsClient.%s')
+SGR_CLIENT = (SERVICES + '.%s.security_group_rules_client.'
+ 'SecurityGroupRulesClient.create_security_group_rule')
+KP_CLIENT = (SERVICES + '.compute.keypairs_client.KeyPairsClient.%s')
+FIP_CLIENT = (SERVICES + '.%s.floating_ips_client.FloatingIPsClient.%s')
+
+
+class TestValidationResources(base.TestCase):
+
+ def setUp(self):
+ super(TestValidationResources, self).setUp()
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'create_security_group'), autospec=True,
+ return_value=FAKE_SECURITY_GROUP))
+ self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'create_security_group'), autospec=True,
+ return_value=FAKE_SECURITY_GROUP))
+ self.mock_sgr_compute = self.useFixture(fixtures.MockPatch(
+ SGR_CLIENT % 'compute', autospec=True))
+ self.mock_sgr_network = self.useFixture(fixtures.MockPatch(
+ SGR_CLIENT % 'network', autospec=True))
+ self.mock_kp = self.useFixture(fixtures.MockPatch(
+ KP_CLIENT % 'create_keypair', autospec=True,
+ return_value=FAKE_KEYPAIR))
+ self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('compute', 'create_floating_ip'), autospec=True,
+ return_value=FAKE_FIP_NOVA_NET))
+ self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('network', 'create_floatingip'), autospec=True,
+ return_value=FAKE_FIP_NEUTRON))
+ self.os = clients.ServiceClients(
+ fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+ def test_create_ssh_security_group_nova_net(self):
+ expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+ sg = vr.create_ssh_security_group(self.os, add_rule=True,
+ use_neutron=False)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # Neutron clients have not been used
+ self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+ # Nova-net clients assertions
+ self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_sgr_compute.mock.call_count, 0)
+ for call in self.mock_sgr_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_sg_id, call[1].values())
+
+ def test_create_ssh_security_group_neutron(self):
+ expected_sg_id = FAKE_SECURITY_GROUP['security_group']['id']
+ expected_ethertype = 'fake_ethertype'
+ sg = vr.create_ssh_security_group(self.os, add_rule=True,
+ use_neutron=True,
+ ethertype=expected_ethertype)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # Nova-net clients have not been used
+ self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+ # Nova-net clients assertions
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_sgr_network.mock.call_count, 0)
+ # Check SG ID and ethertype are passed down to rules
+ for call in self.mock_sgr_network.mock.call_args_list[1:]:
+ self.assertIn(expected_sg_id, call[1].values())
+ self.assertIn(expected_ethertype, call[1].values())
+
+ def test_create_ssh_security_no_rules(self):
+ sg = vr.create_ssh_security_group(self.os, add_rule=False)
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'], sg)
+ # SG Rules clients have not been used
+ self.assertEqual(self.mock_sgr_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sgr_network.mock.call_count, 0)
+
+ @mock.patch.object(vr, 'create_ssh_security_group',
+ return_value=FAKE_SECURITY_GROUP['security_group'])
+ def test_create_validation_resources_nova_net(self, mock_create_sg):
+ expected_floating_network_id = 'my_fni'
+ expected_floating_network_name = 'my_fnn'
+ resources = vr.create_validation_resources(
+ self.os, keypair=True, floating_ip=True, security_group=True,
+ security_group_rules=True, ethertype='IPv6', use_neutron=False,
+ floating_network_id=expected_floating_network_id,
+ floating_network_name=expected_floating_network_name)
+ # Keypair calls
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ # Floating IP calls
+ self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_floating_network_name, call[1].values())
+ self.assertNotIn(expected_floating_network_id, call[1].values())
+ self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+ # SG calls
+ mock_create_sg.assert_called_once()
+ # Resources
+ for resource in ['keypair', 'floating_ip', 'security_group']:
+ self.assertIn(resource, resources)
+ self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+ resources['security_group'])
+ self.assertEqual(FAKE_FIP_NOVA_NET['floating_ip'],
+ resources['floating_ip'])
+
+ @mock.patch.object(vr, 'create_ssh_security_group',
+ return_value=FAKE_SECURITY_GROUP['security_group'])
+ def test_create_validation_resources_neutron(self, mock_create_sg):
+ expected_floating_network_id = 'my_fni'
+ expected_floating_network_name = 'my_fnn'
+ resources = vr.create_validation_resources(
+ self.os, keypair=True, floating_ip=True, security_group=True,
+ security_group_rules=True, ethertype='IPv6', use_neutron=True,
+ floating_network_id=expected_floating_network_id,
+ floating_network_name=expected_floating_network_name)
+ # Keypair calls
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ # Floating IP calls
+ self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(expected_floating_network_id, call[1].values())
+ self.assertNotIn(expected_floating_network_name, call[1].values())
+ # SG calls
+ mock_create_sg.assert_called_once()
+ # Resources
+ for resource in ['keypair', 'floating_ip', 'security_group']:
+ self.assertIn(resource, resources)
+ self.assertEqual(FAKE_KEYPAIR['keypair'], resources['keypair'])
+ self.assertEqual(FAKE_SECURITY_GROUP['security_group'],
+ resources['security_group'])
+ self.assertIn('ip', resources['floating_ip'])
+ self.assertEqual(resources['floating_ip']['ip'],
+ FAKE_FIP_NEUTRON['floatingip']['floating_ip_address'])
+ self.assertEqual(resources['floating_ip']['id'],
+ FAKE_FIP_NEUTRON['floatingip']['id'])
+
+
+class TestClearValidationResourcesFixture(base.TestCase):
+
+ def setUp(self):
+ super(TestClearValidationResourcesFixture, self).setUp()
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.mock_sg_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'delete_security_group'), autospec=True))
+ self.mock_sg_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'delete_security_group'), autospec=True))
+ self.mock_sg_wait_compute = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('compute', 'wait_for_resource_deletion'),
+ autospec=True))
+ self.mock_sg_wait_network = self.useFixture(fixtures.MockPatch(
+ SG_CLIENT % ('network', 'wait_for_resource_deletion'),
+ autospec=True))
+ self.mock_kp = self.useFixture(fixtures.MockPatch(
+ KP_CLIENT % 'delete_keypair', autospec=True))
+ self.mock_fip_compute = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('compute', 'delete_floating_ip'), autospec=True))
+ self.mock_fip_network = self.useFixture(fixtures.MockPatch(
+ FIP_CLIENT % ('network', 'delete_floatingip'), autospec=True))
+ self.os = clients.ServiceClients(
+ fake_credentials.FakeKeystoneV3Credentials(), 'fake_uri')
+
+ def test_clear_validation_resources_nova_net(self):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NOVA_NET['floating_ip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=False)
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ for call in self.mock_kp.mock.call_args_list[1:]:
+ self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+ self.assertGreater(self.mock_sg_compute.mock.call_count, 0)
+ for call in self.mock_sg_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertGreater(self.mock_sg_wait_compute.mock.call_count, 0)
+ for call in self.mock_sg_wait_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_compute.mock.call_count, 0)
+ for call in self.mock_fip_compute.mock.call_args_list[1:]:
+ self.assertIn(FAKE_FIP_NOVA_NET['floating_ip']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_neutron(self):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ for call in self.mock_kp.mock.call_args_list[1:]:
+ self.assertIn(FAKE_KEYPAIR['keypair']['name'], call[1].values())
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ for call in self.mock_sg_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+ for call in self.mock_sg_wait_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_SECURITY_GROUP['security_group']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_sg_compute.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_compute.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+ for call in self.mock_fip_network.mock.call_args_list[1:]:
+ self.assertIn(FAKE_FIP_NEUTRON['floatingip']['id'],
+ call[1].values())
+ self.assertEqual(self.mock_fip_compute.mock.call_count, 0)
+
+ def test_clear_validation_resources_exceptions(self):
+ # Test that even with exceptions all cleanups are invoked and that only
+ # the first exception is reported.
+ # NOTE(andreaf) There's not way of knowing which exception is going to
+ # be raised first unless we enforce which resource is cleared first,
+ # which is not really interesting, but also not harmful. keypair first.
+ self.mock_kp.mock.side_effect = Exception('keypair exception')
+ self.mock_sg_network.mock.side_effect = Exception('sg exception')
+ self.mock_fip_network.mock.side_effect = Exception('fip exception')
+ with testtools.ExpectedException(Exception, value_re='keypair'):
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_wait_not_found_wait(self):
+ # Test that a not found on wait is not an exception
+ self.mock_sg_wait_network.mock.side_effect = lib_exc.NotFound('yay')
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+ def test_clear_validation_resources_wait_not_found_delete(self):
+ # Test that a not found on delete is not an exception
+ self.mock_kp.mock.side_effect = lib_exc.NotFound('yay')
+ self.mock_sg_network.mock.side_effect = lib_exc.NotFound('yay')
+ self.mock_fip_network.mock.side_effect = lib_exc.NotFound('yay')
+ vr.clear_validation_resources(
+ self.os,
+ floating_ip=FAKE_FIP_NEUTRON['floatingip'],
+ security_group=FAKE_SECURITY_GROUP['security_group'],
+ keypair=FAKE_KEYPAIR['keypair'],
+ use_neutron=True)
+ # Clients calls are still made, but not the wait call
+ self.assertGreater(self.mock_kp.mock.call_count, 0)
+ self.assertGreater(self.mock_sg_network.mock.call_count, 0)
+ self.assertEqual(self.mock_sg_wait_network.mock.call_count, 0)
+ self.assertGreater(self.mock_fip_network.mock.call_count, 0)
+
+
+class TestValidationResourcesFixture(base.TestCase):
+
+ @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+ def test_use_fixture(self, mock_vr):
+ exp_vr = dict(keypair='keypair',
+ floating_ip='floating_ip',
+ security_group='security_group')
+ mock_vr.return_value = exp_vr
+ exp_clients = 'clients'
+ exp_parameters = dict(keypair=True, floating_ip=True,
+ security_group=True, security_group_rules=True,
+ ethertype='v6', use_neutron=True,
+ floating_network_id='fnid',
+ floating_network_name='fnname')
+ # First mock cleanup
+ self.useFixture(fixtures.MockPatchObject(
+ vr, 'clear_validation_resources', autospec=True))
+ # And then use vr fixture, so when the fixture is cleaned-up, the mock
+ # is still there
+ vr_fixture = self.useFixture(vr.ValidationResourcesFixture(
+ exp_clients, **exp_parameters))
+ # Assert vr have been provisioned
+ mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+ # Assert vr have been setup in the fixture
+ self.assertEqual(exp_vr, vr_fixture.resources)
+
+ @mock.patch.object(vr, 'clear_validation_resources', autospec=True)
+ @mock.patch.object(vr, 'create_validation_resources', autospec=True)
+ def test_use_fixture_context(self, mock_vr, mock_clear):
+ exp_vr = dict(keypair='keypair',
+ floating_ip='floating_ip',
+ security_group='security_group')
+ mock_vr.return_value = exp_vr
+ exp_clients = 'clients'
+ exp_parameters = dict(keypair=True, floating_ip=True,
+ security_group=True, security_group_rules=True,
+ ethertype='v6', use_neutron=True,
+ floating_network_id='fnid',
+ floating_network_name='fnname')
+ with vr.ValidationResourcesFixture(exp_clients,
+ **exp_parameters) as vr_fixture:
+ # Assert vr have been provisioned
+ mock_vr.assert_called_once_with(exp_clients, **exp_parameters)
+ # Assert vr have been setup in the fixture
+ self.assertEqual(exp_vr, vr_fixture.resources)
+ # After context manager is closed, clear is invoked
+ exp_vr['use_neutron'] = exp_parameters['use_neutron']
+ mock_clear.assert_called_once_with(exp_clients, **exp_vr)
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
new file mode 100644
index 0000000..8484209
--- /dev/null
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -0,0 +1,65 @@
+# Copyright 2017 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+
+from tempest.lib.services import clients
+
+
+class RegistryFixture(fixtures.Fixture):
+ """A fixture to setup a test client registry
+
+ The clients registry is a singleton. In Tempest it's filled with
+ content from configuration. When testing Tempest lib classes without
+ configuration it's handy to have the registry setup to be able to access
+ service client factories.
+
+ This fixture sets up the registry using a fake plugin, which includes all
+ services specified at __init__ time. Any other plugin in the registry
+ is removed at setUp time. The fake plugin is removed from the registry
+ on cleanup.
+ """
+
+ PLUGIN_NAME = 'fake_plugin_for_test'
+
+ def __init__(self):
+ """Initialise the registry fixture"""
+ self.services = set(['compute', 'identity.v2', 'identity.v3',
+ 'image.v1', 'image.v2', 'network', 'volume.v1',
+ 'volume.v2', 'volume.v3'])
+
+ def _setUp(self):
+ # Cleanup the registry
+ registry = clients.ClientsRegistry()
+ registry._service_clients = {}
+ # Prepare the clients for registration
+ all_clients = []
+ service_clients = clients.tempest_modules()
+ for sc in self.services:
+ sc_module = service_clients[sc]
+ sc_unversioned = sc.split('.')[0]
+ sc_name = sc.replace('.', '_')
+ # Pass the bare minimum params to satisfy the clients interface
+ service_client_data = dict(
+ name=sc_name, service_version=sc, service=sc_unversioned,
+ module_path=sc_module.__name__,
+ client_names=sc_module.__all__)
+ all_clients.append(service_client_data)
+ registry.register_service_client(self.PLUGIN_NAME, all_clients)
+
+ def _cleanup():
+ del registry._service_clients[self.PLUGIN_NAME]
+
+ self.addCleanup(_cleanup)
diff --git a/tempest/tests/test_tempest_plugin.py b/tempest/tests/test_tempest_plugin.py
index 13e2499..ddadef5 100644
--- a/tempest/tests/test_tempest_plugin.py
+++ b/tempest/tests/test_tempest_plugin.py
@@ -17,9 +17,16 @@
from tempest.test_discover import plugins
from tempest.tests import base
from tempest.tests import fake_tempest_plugin as fake_plugin
+from tempest.tests.lib.services import registry_fixture
class TestPluginDiscovery(base.TestCase):
+
+ def setUp(self):
+ super(TestPluginDiscovery, self).setUp()
+ # Make sure we leave the registry clean
+ self.useFixture(registry_fixture.RegistryFixture())
+
def test_load_tests_with_one_plugin(self):
# we can't mock stevedore since it's a singleton and already executed
# during test discovery. So basically this test covers the plugin loop
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
new file mode 100644
index 0000000..ead0bd8
--- /dev/null
+++ b/tempest/tests/test_test.py
@@ -0,0 +1,428 @@
+# Copyright 2017 IBM Corp
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+import mock
+from oslo_config import cfg
+import testtools
+
+from tempest import clients
+from tempest import config
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests.lib import fake_credentials
+from tempest.tests.lib.services import registry_fixture
+
+
+if sys.version_info >= (2, 7):
+ import unittest
+else:
+ import unittest2 as unittest
+
+
+class LoggingTestResult(testtools.TestResult):
+
+ def __init__(self, log, *args, **kwargs):
+ super(LoggingTestResult, self).__init__(*args, **kwargs)
+ self.log = log
+
+ def addError(self, test, err=None, details=None):
+ self.log.append((test, err, details))
+
+
+class TestValidationResources(base.TestCase):
+
+ validation_resources_module = 'tempest.lib.common.validation_resources'
+
+ def setUp(self):
+ super(TestValidationResources, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.useFixture(registry_fixture.RegistryFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+
+ class TestTestClass(test.BaseTestCase):
+ pass
+
+ self.test_test_class = TestTestClass
+
+ def test_validation_resources_no_validation(self):
+ cfg.CONF.set_default('run_validation', False, 'validation')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ vr = self.test_test_class.get_class_validation_resources(osclients)
+ self.assertIsNone(vr)
+
+ def test_validation_resources_exists(self):
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = 'expected_validation_resources'
+ self.test_test_class._validation_resources[osclients] = expected_vr
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(expected_vr, obtained_vr)
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ def test_validation_resources_new(self, mock_create_vr):
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('neutron', True, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = {'expected_validation_resources': None}
+ mock_create_vr.return_value = expected_vr
+ with mock.patch.object(
+ self.test_test_class,
+ 'addClassResourceCleanup') as mock_add_class_cleanup:
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(1, mock_add_class_cleanup.call_count)
+ self.assertEqual(mock.call(vr.clear_validation_resources,
+ osclients,
+ use_neutron=True,
+ **expected_vr),
+ mock_add_class_cleanup.call_args)
+ self.assertEqual(mock_create_vr.call_count, 1)
+ self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+ self.assertEqual(expected_vr, obtained_vr)
+ self.assertIn(osclients, self.test_test_class._validation_resources)
+ self.assertEqual(expected_vr,
+ self.test_test_class._validation_resources[osclients])
+
+ def test_validation_resources_invalid_config(self):
+ invalid_version = 999
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+ 'validation')
+ cfg.CONF.set_default('neutron', True, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ with testtools.ExpectedException(
+ lib_exc.InvalidConfiguration,
+ value_re='^.*\n.*' + str(invalid_version)):
+ self.test_test_class.get_class_validation_resources(osclients)
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ def test_validation_resources_invalid_config_nova_net(self,
+ mock_create_vr):
+ invalid_version = 999
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ cfg.CONF.set_default('ip_version_for_ssh', invalid_version,
+ 'validation')
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ expected_vr = {'expected_validation_resources': None}
+ mock_create_vr.return_value = expected_vr
+ obtained_vr = self.test_test_class.get_class_validation_resources(
+ osclients)
+ self.assertEqual(mock_create_vr.call_count, 1)
+ self.assertIn(osclients, mock_create_vr.call_args_list[0][0])
+ self.assertEqual(expected_vr, obtained_vr)
+ self.assertIn(osclients, self.test_test_class._validation_resources)
+ self.assertEqual(expected_vr,
+ self.test_test_class._validation_resources[osclients])
+
+ @mock.patch(validation_resources_module + '.create_validation_resources',
+ autospec=True)
+ @mock.patch(validation_resources_module + '.clear_validation_resources',
+ autospec=True)
+ def test_validation_resources_fixture(self, mock_clean_vr, mock_create_vr):
+
+ class TestWithRun(self.test_test_class):
+
+ def runTest(self):
+ pass
+
+ cfg.CONF.set_default('run_validation', True, 'validation')
+ test_case = TestWithRun()
+ creds = fake_credentials.FakeKeystoneV3Credentials()
+ osclients = clients.Manager(creds)
+ test_case.get_test_validation_resources(osclients)
+ self.assertEqual(1, mock_create_vr.call_count)
+ self.assertEqual(0, mock_clean_vr.call_count)
+
+
+class TestSetNetworkResources(base.TestCase):
+
+ def setUp(self):
+ super(TestSetNetworkResources, self).setUp()
+
+ class ParentTest(test.BaseTestCase):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(dhcp=True)
+ super(ParentTest, cls).setup_credentials()
+
+ def runTest(self):
+ pass
+
+ self.parent_class = ParentTest
+
+ def test_set_network_resources_child_only(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(router=True)
+ super(ChildTest, cls).setup_credentials()
+
+ child_test = ChildTest()
+ child_test.setUpClass()
+ # Assert that the parents network resources are not set
+ self.assertFalse(child_test._network_resources['dhcp'])
+ # Assert that the child network resources are set
+ self.assertTrue(child_test._network_resources['router'])
+
+ def test_set_network_resources_right_order(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ super(ChildTest, cls).setup_credentials()
+ cls.set_network_resources(router=True)
+
+ child_test = ChildTest()
+ with testtools.ExpectedException(RuntimeError,
+ value_re='set_network_resources'):
+ child_test.setUpClass()
+
+ def test_set_network_resources_children(self):
+
+ class ChildTest(self.parent_class):
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(router=True)
+ super(ChildTest, cls).setup_credentials()
+
+ class GrandChildTest(ChildTest):
+ pass
+
+ # Invoke setupClass on both and check that the setup_credentials
+ # call check mechanism does not report any false negative.
+ child_test = ChildTest()
+ child_test.setUpClass()
+ grandchild_test = GrandChildTest()
+ grandchild_test.setUpClass()
+
+
+class TestTempestBaseTestClass(base.TestCase):
+
+ def setUp(self):
+ super(TestTempestBaseTestClass, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.patchobject(config, 'TempestConfigPrivate',
+ fake_config.FakePrivate)
+
+ class ParentTest(test.BaseTestCase):
+
+ def runTest(self):
+ pass
+
+ self.parent_test = ParentTest
+
+ def test_resource_cleanup(self):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ exp_args = (1, 2,)
+ exp_kwargs = {'a': 1, 'b': 2}
+ mock1 = mock.Mock()
+ mock2 = mock.Mock()
+ exp_functions = [mock1, mock2]
+
+ class TestWithCleanups(self.parent_test):
+
+ @classmethod
+ def resource_setup(cls):
+ for fn in exp_functions:
+ cls.addClassResourceCleanup(fn, *exp_args,
+ **exp_kwargs)
+
+ test_cleanups = TestWithCleanups()
+ suite = unittest.TestSuite((test_cleanups,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # No exception raised - error log is empty
+ self.assertFalse(log)
+ # All stacked resource cleanups invoked
+ mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+ mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+ # Cleanup stack is empty
+ self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+ def test_resource_cleanup_failures(self):
+ cfg.CONF.set_default('neutron', False, 'service_available')
+ exp_args = (1, 2,)
+ exp_kwargs = {'a': 1, 'b': 2}
+ mock1 = mock.Mock()
+ mock1.side_effect = Exception('mock1 resource cleanup failure')
+ mock2 = mock.Mock()
+ mock3 = mock.Mock()
+ mock3.side_effect = Exception('mock3 resource cleanup failure')
+ exp_functions = [mock1, mock2, mock3]
+
+ class TestWithFailingCleanups(self.parent_test):
+
+ @classmethod
+ def resource_setup(cls):
+ for fn in exp_functions:
+ cls.addClassResourceCleanup(fn, *exp_args,
+ **exp_kwargs)
+
+ test_cleanups = TestWithFailingCleanups()
+ suite = unittest.TestSuite((test_cleanups,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # One multiple exception captured
+ self.assertEqual(1, len(log))
+ # [0]: test, err, details [1] -> exc_info
+ # Type, Exception, traceback [1] -> MultipleException
+ found_exc = log[0][1][1]
+ self.assertTrue(isinstance(found_exc, testtools.MultipleExceptions))
+ self.assertEqual(2, len(found_exc.args))
+ # Each arg is exc_info - match messages and order
+ self.assertIn('mock3 resource', str(found_exc.args[0][1]))
+ self.assertIn('mock1 resource', str(found_exc.args[1][1]))
+ # All stacked resource cleanups invoked
+ mock1.assert_called_once_with(*exp_args, **exp_kwargs)
+ mock2.assert_called_once_with(*exp_args, **exp_kwargs)
+ # Cleanup stack is empty
+ self.assertEqual(0, len(test_cleanups._class_cleanups))
+
+ def test_super_resource_cleanup_not_invoked(self):
+
+ class BadResourceCleanup(self.parent_test):
+
+ @classmethod
+ def resource_cleanup(cls):
+ pass
+
+ bad_class = BadResourceCleanup()
+ suite = unittest.TestSuite((bad_class,))
+ log = []
+ result = LoggingTestResult(log)
+ suite.run(result)
+ # One multiple exception captured
+ self.assertEqual(1, len(log))
+ # [0]: test, err, details [1] -> exc_info
+ # Type, Exception, traceback [1] -> RuntimeError
+ found_exc = log[0][1][1]
+ self.assertTrue(isinstance(found_exc, RuntimeError))
+ self.assertIn(BadResourceCleanup.__name__, str(found_exc))
+
+ def test_super_skip_checks_not_invoked(self):
+
+ class BadSkipChecks(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ bad_class = BadSkipChecks()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + BadSkipChecks.__name__):
+ bad_class.setUpClass()
+
+ def test_super_setup_credentials_not_invoked(self):
+
+ class BadSetupCredentials(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ bad_class = BadSetupCredentials()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + BadSetupCredentials.__name__):
+ bad_class.setUpClass()
+
+ def test_grandparent_skip_checks_not_invoked(self):
+
+ class BadSkipChecks(self.parent_test):
+
+ @classmethod
+ def skip_checks(cls):
+ pass
+
+ class SonOfBadSkipChecks(BadSkipChecks):
+ pass
+
+ bad_class = SonOfBadSkipChecks()
+ with testtools.ExpectedException(
+ RuntimeError,
+ value_re='^.* ' + SonOfBadSkipChecks.__name__):
+ bad_class.setUpClass()
+
+ @mock.patch('tempest.common.credentials_factory.is_admin_available',
+ autospec=True, return_value=True)
+ def test_skip_checks_admin(self, mock_iaa):
+ identity_version = 'identity_version'
+
+ class NeedAdmin(self.parent_test):
+ credentials = ['admin']
+
+ @classmethod
+ def get_identity_version(cls):
+ return identity_version
+
+ NeedAdmin().skip_checks()
+ mock_iaa.assert_called_once_with('identity_version')
+
+ @mock.patch('tempest.common.credentials_factory.is_admin_available',
+ autospec=True, return_value=False)
+ def test_skip_checks_admin_not_available(self, mock_iaa):
+ identity_version = 'identity_version'
+
+ class NeedAdmin(self.parent_test):
+ credentials = ['admin']
+
+ @classmethod
+ def get_identity_version(cls):
+ return identity_version
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedAdmin().skip_checks()
+ mock_iaa.assert_called_once_with('identity_version')
+
+ def test_skip_checks_identity_v2_not_available(self):
+ cfg.CONF.set_default('api_v2', False, 'identity-feature-enabled')
+
+ class NeedV2(self.parent_test):
+ identity_version = 'v2'
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedV2().skip_checks()
+
+ def test_skip_checks_identity_v3_not_available(self):
+ cfg.CONF.set_default('api_v3', False, 'identity-feature-enabled')
+
+ class NeedV3(self.parent_test):
+ identity_version = 'v3'
+
+ with testtools.ExpectedException(testtools.testcase.TestSkipped):
+ NeedV3().skip_checks()
diff --git a/tox.ini b/tox.ini
index 24e3b5d..7bdc580 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,6 +17,7 @@
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/tests
+ OS_LOG_CAPTURE=1
PYTHONWARNINGS=default::DeprecationWarning
BRANCH_NAME=master
CLIENT_NAME=tempest