Merge "Fix the removal of config.skip_(unless|if)_config decorators"
diff --git a/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml b/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
index aa3a78e..389b29f 100644
--- a/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
+++ b/releasenotes/notes/14.0.0-remo-stress-tests-81052b211ad95d2e.yaml
@@ -1,4 +1,13 @@
---
+prelude: >
+ This release is marking the end of Liberty release support in Tempest
upgrade:
- The Stress tests framework and all the stress tests have been removed.
+other:
+ - |
+ OpenStack releases supported at this time are **Mitaka** and **Newton**.
+ The release under current development as of this tag is Ocata, meaning that
+ every Tempest commit is also tested against master during the Ocata cycle.
+ However, this does not necessarily mean that using Tempest as of this tag
+ will work against a Ocata (or future releases) cloud.
diff --git a/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml b/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
index 8665b8b..104bf27 100644
--- a/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
+++ b/releasenotes/notes/15.0.0-remove-deprecated-compute-validation-config-options-e3d1b89ce074d71c.yaml
@@ -1,4 +1,6 @@
---
+prelude: >
+ This release is marking the start of Ocata release support in Tempest
upgrade:
- |
Below deprecated config options from compute group have been removed.
@@ -11,4 +13,13 @@
- ``compute.ping_size `` (available as ``validation.ping_size``)
- ``compute.ping_count `` (available as ``validation.ping_count``)
- ``compute.floating_ip_range `` (available as ``validation.floating_ip_range``)
+other:
+ - |
+ OpenStack releases supported at this time are **Mitaka**, **Newton**,
+ and **Ocata**.
+ The release under current development as of this tag is Pike,
+ meaning that every Tempest commit is also tested against master during
+ the Pike cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a Pike (or future releases)
+ cloud.
diff --git a/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml b/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
deleted file mode 100644
index 5555949..0000000
--- a/releasenotes/notes/15.0.0-start-of-pike-support-4925678d477b0745.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-prelude: >
- This release is marking the start of Ocata release support in Tempest
-other:
- - |
- OpenStack releases supported at this time are **Mitaka**, **Newton**,
- and **Ocata**.
-
- The release under current development as of this tag is Pike,
- meaning that every Tempest commit is also tested against master during
- the Pike cycle. However, this does not necessarily mean that using
- Tempest as of this tag will work against a Pike (or future releases)
- cloud.
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 04b0c2d..a9daba8 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.api.compute import base
-from tempest.common.utils import data_utils
from tempest.lib import decorators
from tempest import test
@@ -47,51 +46,37 @@
def test_flavor_access_list_with_private_flavor(self):
# Test to make sure that list flavor access on a newly created
# private flavor will return an empty access list
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
+
flavor_access = (self.admin_flavors_client.list_flavor_access(
- new_flavor_id)['flavor_access'])
+ flavor['id'])['flavor_access'])
self.assertEqual(len(flavor_access), 0, str(flavor_access))
@decorators.idempotent_id('59e622f6-bdf6-45e3-8ba8-fedad905a6b4')
def test_flavor_access_add_remove(self):
# Test to add and remove flavor access to a given tenant.
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
+
# Add flavor access to a tenant.
resp_body = {
"tenant_id": str(self.tenant_id),
- "flavor_id": str(new_flavor['id']),
+ "flavor_id": str(flavor['id']),
}
add_body = (self.admin_flavors_client.add_flavor_access(
- new_flavor['id'], self.tenant_id)['flavor_access'])
+ flavor['id'], self.tenant_id)['flavor_access'])
self.assertIn(resp_body, add_body)
# The flavor is present in list.
flavors = self.flavors_client.list_flavors(detail=True)['flavors']
- self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
+ self.assertIn(flavor['id'], map(lambda x: x['id'], flavors))
# Remove flavor access from a tenant.
remove_body = (self.admin_flavors_client.remove_flavor_access(
- new_flavor['id'], self.tenant_id)['flavor_access'])
+ flavor['id'], self.tenant_id)['flavor_access'])
self.assertNotIn(resp_body, remove_body)
# The flavor is not present in list.
flavors = self.flavors_client.list_flavors(detail=True)['flavors']
- self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
+ self.assertNotIn(flavor['id'], map(lambda x: x['id'], flavors))
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 9fe1f74..33d5d73 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -14,7 +14,6 @@
# under the License.
from tempest.api.compute import base
-from tempest.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
@@ -49,108 +48,69 @@
@decorators.idempotent_id('0621c53e-d45d-40e7-951d-43e5e257b272')
def test_flavor_access_list_with_public_flavor(self):
# Test to list flavor access with exceptions by querying public flavor
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='True')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='True')
self.assertRaises(lib_exc.NotFound,
self.admin_flavors_client.list_flavor_access,
- new_flavor_id)
+ flavor['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('41eaaade-6d37-4f28-9c74-f21b46ca67bd')
def test_flavor_non_admin_add(self):
# Test to add flavor access as a user without admin privileges.
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.add_flavor_access,
- new_flavor['id'],
+ flavor['id'],
self.tenant_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('073e79a6-c311-4525-82dc-6083d919cb3a')
def test_flavor_non_admin_remove(self):
# Test to remove flavor access as a user without admin privileges.
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
+
# Add flavor access to a tenant.
- self.admin_flavors_client.add_flavor_access(new_flavor['id'],
+ self.admin_flavors_client.add_flavor_access(flavor['id'],
self.tenant_id)
self.addCleanup(self.admin_flavors_client.remove_flavor_access,
- new_flavor['id'], self.tenant_id)
+ flavor['id'], self.tenant_id)
self.assertRaises(lib_exc.Forbidden,
self.flavors_client.remove_flavor_access,
- new_flavor['id'],
+ flavor['id'],
self.tenant_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('f3592cc0-0306-483c-b210-9a7b5346eddc')
def test_add_flavor_access_duplicate(self):
# Create a new flavor.
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
# Add flavor access to a tenant.
- self.admin_flavors_client.add_flavor_access(new_flavor['id'],
+ self.admin_flavors_client.add_flavor_access(flavor['id'],
self.tenant_id)
self.addCleanup(self.admin_flavors_client.remove_flavor_access,
- new_flavor['id'], self.tenant_id)
+ flavor['id'], self.tenant_id)
# An exception should be raised when adding flavor access to the same
# tenant
self.assertRaises(lib_exc.Conflict,
self.admin_flavors_client.add_flavor_access,
- new_flavor['id'],
+ flavor['id'],
self.tenant_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('1f710927-3bc7-4381-9f82-0ca6e42644b7')
def test_remove_flavor_access_not_found(self):
# Create a new flavor.
- flavor_name = data_utils.rand_name(self.flavor_name_prefix)
- new_flavor_id = data_utils.rand_int_id(start=1000)
- new_flavor = self.admin_flavors_client.create_flavor(
- name=flavor_name,
- ram=self.ram, vcpus=self.vcpus,
- disk=self.disk,
- id=new_flavor_id,
- is_public='False')['flavor']
- self.addCleanup(self.admin_flavors_client.delete_flavor,
- new_flavor['id'])
+ flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
+ disk=self.disk, is_public='False')
# An exception should be raised when flavor access is not found
self.assertRaises(lib_exc.NotFound,
self.admin_flavors_client.remove_flavor_access,
- new_flavor['id'],
+ flavor['id'],
self.os_alt.servers_client.tenant_id)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 706b859..55cc293 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -445,8 +445,8 @@
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
- self.servers_client.attach_volume(
- server['id'], **attach_kwargs)
+ attachment = self.servers_client.attach_volume(
+ server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
# is so we don't error out when trying to delete the volume during
# teardown.
@@ -459,6 +459,7 @@
server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
+ return attachment
class BaseV2ComputeAdminTest(BaseV2ComputeTest):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 209ab38..75ba15c 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -58,10 +58,8 @@
cls.password = data_utils.rand_password()
# Server for positive tests
server = cls.create_test_server(adminPass=cls.password,
- wait_until='BUILD')
+ wait_until='ACTIVE')
cls.server_id = server['id']
- waiters.wait_for_server_status(cls.servers_client, cls.server_id,
- 'ACTIVE')
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 5304944..73c7614 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -22,7 +22,6 @@
from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -61,38 +60,14 @@
server['id'])['addresses']
return server
- def _detach_volume(self, server_id, volume_id):
- try:
- self.servers_client.detach_volume(server_id, volume_id)
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume_id, 'available')
- except lib_exc.NotFound:
- LOG.warning("Unable to detach volume %s from server %s "
- "possibly it was already detached", volume_id,
- server_id)
-
- def _attach_volume(self, server_id, volume_id, device=None):
- # Attach the volume to the server
- kwargs = {'volumeId': volume_id}
- if device:
- kwargs.update({'device': '/dev/%s' % device})
- attachment = self.servers_client.attach_volume(
- server_id, **kwargs)['volumeAttachment']
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume_id, 'in-use')
- self.addCleanup(self._detach_volume, server_id,
- volume_id)
-
- return attachment
-
@decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
def test_attach_detach_volume(self):
# Stop and Start a server with an attached volume, ensuring that
# the volume remains attached.
server = self._create_server()
volume = self.create_volume()
- attachment = self._attach_volume(server['id'], volume['id'],
- device=self.device)
+ attachment = self.attach_volume(server, volume,
+ device=('/dev/%s' % self.device))
self.servers_client.stop_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
@@ -115,7 +90,10 @@
device_name_to_match = '\n' + self.device + ' '
self.assertIn(device_name_to_match, disks)
- self._detach_volume(server['id'], attachment['volumeId'])
+ self.servers_client.detach_volume(server['id'], attachment['volumeId'])
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'available')
+
self.servers_client.stop_server(server['id'])
waiters.wait_for_server_status(self.servers_client, server['id'],
'SHUTOFF')
@@ -141,8 +119,8 @@
# List volume attachment of the server
server = self._create_server()
volume = self.create_volume()
- attachment = self._attach_volume(server['id'], volume['id'],
- device=self.device)
+ attachment = self.attach_volume(server, volume,
+ device=('/dev/%s' % self.device))
body = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
self.assertEqual(1, len(body))
@@ -165,8 +143,8 @@
server = self._create_server()
volume_1st = self.create_volume()
volume_2nd = self.create_volume()
- attachment_1st = self._attach_volume(server['id'], volume_1st['id'])
- attachment_2nd = self._attach_volume(server['id'], volume_2nd['id'])
+ attachment_1st = self.attach_volume(server, volume_1st)
+ attachment_2nd = self.attach_volume(server, volume_2nd)
body = self.servers_client.list_volume_attachments(
server['id'])['volumeAttachments']
@@ -253,8 +231,8 @@
volume = self.create_volume()
num_vol = self._count_volumes(server)
self._shelve_server(server)
- attachment = self._attach_volume(server['id'], volume['id'],
- device=self.device)
+ attachment = self.attach_volume(server, volume,
+ device=('/dev/%s' % self.device))
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(server, num_vol + 1)
@@ -279,9 +257,12 @@
volume = self.create_volume()
num_vol = self._count_volumes(server)
self._shelve_server(server)
- self._attach_volume(server['id'], volume['id'], device=self.device)
- # Detach the volume
- self._detach_volume(server['id'], volume['id'])
+
+ # Attach and then detach the volume
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.servers_client.detach_volume(server['id'], volume['id'])
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
# Unshelve the instance and check that we have the expected number of
# volume(s)
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 445d928..b7b6596 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -306,3 +306,66 @@
roles_ids = [assignment['role']['id']
for assignment in role_assignments]
self.assertIn(self.roles[0]['id'], roles_ids)
+
+ @decorators.idempotent_id('d92a41d2-5501-497a-84bb-6e294330e8f8')
+ def test_domain_roles_create_delete(self):
+ domain_role = self.roles_client.create_role(
+ name=data_utils.rand_name('domain_role'),
+ domain_id=self.domain['id'])['role']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.roles_client.delete_role,
+ domain_role['id'])
+
+ domain_roles = self.roles_client.list_roles(
+ domain_id=self.domain['id'])['roles']
+ self.assertEqual(1, len(domain_roles))
+ self.assertIn(domain_role, domain_roles)
+
+ self.roles_client.delete_role(domain_role['id'])
+ domain_roles = self.roles_client.list_roles(
+ domain_id=self.domain['id'])['roles']
+ self.assertEmpty(domain_roles)
+
+ @decorators.idempotent_id('eb1e1c24-1bc4-4d47-9748-e127a1852c82')
+ def test_implied_domain_roles(self):
+ # Create two roles in the same domain
+ domain_role1 = self.setup_test_role(domain_id=self.domain['id'])
+ domain_role2 = self.setup_test_role(domain_id=self.domain['id'])
+
+ # Check if we can create an inference rule from roles in the same
+ # domain
+ self._create_implied_role(domain_role1['id'], domain_role2['id'])
+
+ # Create another role in a different domain
+ domain2 = self.setup_test_domain()
+ domain_role3 = self.setup_test_role(domain_id=domain2['id'])
+
+ # Check if we can create cross domain implied roles
+ self._create_implied_role(domain_role1['id'], domain_role3['id'])
+
+ # Finally, we also should be able to create an implied from a
+ # domain role to a global one
+ self._create_implied_role(domain_role1['id'], self.role['id'])
+
+ @decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
+ def test_assignments_for_domain_roles(self):
+ domain_role = self.setup_test_role(domain_id=self.domain['id'])
+
+ # Create a grant using "domain_role"
+ self.roles_client.create_user_role_on_project(
+ self.project['id'], self.user_body['id'], domain_role['id'])
+ self.addCleanup(
+ self.roles_client.delete_role_from_user_on_project,
+ self.project['id'], self.user_body['id'], domain_role['id'])
+
+ # NOTE(rodrigods): Regular roles would appear in the effective
+ # list of role assignments (meaning the role would be returned in
+ # a token) as a result from the grant above. This is not the case
+ # for domain roles, they should not appear in the effective role
+ # assignments list.
+ params = {'scope.project.id': self.project['id'],
+ 'user.id': self.user_body['id']}
+ role_assignments = self.role_assignments.list_role_assignments(
+ effective=True, **params)['role_assignments']
+ self.assertEmpty(role_assignments)
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 3bbe47a..80e7936 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -75,10 +75,13 @@
self.addCleanup(self.users_client.delete_user, user['id'])
return user
- def setup_test_role(self):
+ def setup_test_role(self, domain_id=None):
"""Set up a test role."""
- role = self.roles_client.create_role(
- name=data_utils.rand_name('test_role'))['role']
+ params = {'name': data_utils.rand_name('test_role')}
+ if domain_id:
+ params['domain_id'] = domain_id
+
+ role = self.roles_client.create_role(**params)['role']
# Delete the role at the end of the test
self.addCleanup(self.roles_client.delete_role, role['id'])
return role
diff --git a/tempest/api/volume/admin/v2/test_snapshot_manage.py b/tempest/api/volume/admin/v2/test_snapshot_manage.py
index eed7dd1..e8bd477 100644
--- a/tempest/api/volume/admin/v2/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/v2/test_snapshot_manage.py
@@ -61,8 +61,8 @@
new_snapshot = self.admin_snapshot_manage_client.manage_snapshot(
volume_id=volume['id'],
ref={'source-name': snapshot_ref})['snapshot']
- self.addCleanup(self.delete_snapshot,
- self.admin_snapshots_client, new_snapshot['id'])
+ self.addCleanup(self.delete_snapshot, new_snapshot['id'],
+ self.admin_snapshots_client)
# Wait for the snapshot to be available after manage operation
waiters.wait_for_volume_resource_status(self.admin_snapshots_client,
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index f8c435f..fd10fb3 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -145,7 +145,7 @@
snapshot = cls.snapshots_client.create_snapshot(
volume_id=volume_id, **kwargs)['snapshot']
- cls.snapshots.append(snapshot)
+ cls.snapshots.append(snapshot['id'])
waiters.wait_for_volume_resource_status(cls.snapshots_client,
snapshot['id'], 'available')
return snapshot
@@ -171,11 +171,14 @@
client.delete_volume(volume_id)
client.wait_for_resource_deletion(volume_id)
- @staticmethod
- def delete_snapshot(client, snapshot_id):
+ def delete_snapshot(self, snapshot_id, snapshots_client=None):
"""Delete snapshot by the given client"""
- client.delete_snapshot(snapshot_id)
- client.wait_for_resource_deletion(snapshot_id)
+ if snapshots_client is None:
+ snapshots_client = self.snapshots_client
+ snapshots_client.delete_snapshot(snapshot_id)
+ snapshots_client.wait_for_resource_deletion(snapshot_id)
+ if snapshot_id in self.snapshots:
+ self.snapshots.remove(snapshot_id)
def attach_volume(self, server_id, volume_id):
"""Attach a volume to a server"""
@@ -207,12 +210,12 @@
def clear_snapshots(cls):
for snapshot in cls.snapshots:
test_utils.call_and_ignore_notfound_exc(
- cls.snapshots_client.delete_snapshot, snapshot['id'])
+ cls.snapshots_client.delete_snapshot, snapshot)
for snapshot in cls.snapshots:
test_utils.call_and_ignore_notfound_exc(
cls.snapshots_client.wait_for_resource_deletion,
- snapshot['id'])
+ snapshot)
def create_server(self, **kwargs):
name = kwargs.pop(
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 19cd98f..5abda5e 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -36,12 +36,6 @@
cls.name_field = cls.special_fields['name_field']
cls.descrip_field = cls.special_fields['descrip_field']
- def cleanup_snapshot(self, snapshot):
- # Delete the snapshot
- self.snapshots_client.delete_snapshot(snapshot['id'])
- self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
- self.snapshots.remove(snapshot)
-
@decorators.idempotent_id('b467b54c-07a4-446d-a1cf-651dedcc3ff1')
@test.services('compute')
def test_snapshot_create_with_volume_in_use(self):
@@ -54,7 +48,7 @@
snapshot = self.create_snapshot(self.volume_origin['id'],
force=True)
# Delete the snapshot
- self.cleanup_snapshot(snapshot)
+ self.delete_snapshot(snapshot['id'])
@decorators.idempotent_id('8567b54c-4455-446d-a1cf-651ddeaa3ff2')
@test.services('compute')
@@ -70,9 +64,9 @@
# Delete the snapshots. Some snapshot implementations can take
# different paths according to order they are deleted.
- self.cleanup_snapshot(snapshot1)
- self.cleanup_snapshot(snapshot3)
- self.cleanup_snapshot(snapshot2)
+ self.delete_snapshot(snapshot1['id'])
+ self.delete_snapshot(snapshot3['id'])
+ self.delete_snapshot(snapshot2['id'])
@decorators.idempotent_id('5210a1de-85a0-11e6-bb21-641c676a5d61')
@test.services('compute')
@@ -91,9 +85,9 @@
# Delete the snapshots. Some snapshot implementations can take
# different paths according to order they are deleted.
- self.cleanup_snapshot(snapshot3)
- self.cleanup_snapshot(snapshot1)
- self.cleanup_snapshot(snapshot2)
+ self.delete_snapshot(snapshot3['id'])
+ self.delete_snapshot(snapshot1['id'])
+ self.delete_snapshot(snapshot2['id'])
@decorators.idempotent_id('2a8abbe4-d871-46db-b049-c41f5af8216e')
def test_snapshot_create_get_list_update_delete(self):
@@ -139,7 +133,7 @@
self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
# Delete the snapshot
- self.cleanup_snapshot(snapshot)
+ self.delete_snapshot(snapshot['id'])
@decorators.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
def test_volume_from_snapshot(self):
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 0f868a9..8ee3055 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -294,7 +294,8 @@
outfile.write(json.dumps(url_parser.test_logs))
return
- for test_name, items in url_parser.test_logs.iteritems():
+ for test_name in url_parser.test_logs:
+ items = url_parser.test_logs[test_name]
sys.stdout.write('{0}\n'.format(test_name))
if not items:
sys.stdout.write('\n')
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 9c33b71..b72dae9 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -43,16 +43,13 @@
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _get_bdm(self, source_id, source_type, delete_on_termination=False):
- # NOTE(gfidente): the syntax for block_device_mapping is
- # dev_name=id:type:size:delete_on_terminate
- # where type needs to be "snap" if the server is booted
- # from a snapshot, size instead can be safely left empty
-
- bd_map = [{
- 'device_name': 'vda',
- '{}_id'.format(source_type): source_id,
- 'delete_on_termination': str(int(delete_on_termination))}]
- return {'block_device_mapping': bd_map}
+ bd_map_v2 = [{
+ 'uuid': source_id,
+ 'source_type': source_type,
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'delete_on_termination': delete_on_termination}]
+ return {'block_device_mapping_v2': bd_map_v2}
def _boot_instance_from_resource(self, source_id,
source_type,
@@ -236,14 +233,3 @@
# delete instance
self._delete_server(instance)
-
-
-class TestVolumeBootPatternV2(TestVolumeBootPattern):
- def _get_bdm(self, source_id, source_type, delete_on_termination=False):
- bd_map_v2 = [{
- 'uuid': source_id,
- 'source_type': source_type,
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'delete_on_termination': delete_on_termination}]
- return {'block_device_mapping_v2': bd_map_v2}
diff --git a/tempest/tests/cmd/test_subunit_describe_calls.py b/tempest/tests/cmd/test_subunit_describe_calls.py
index 1c24c37..5f3d770 100644
--- a/tempest/tests/cmd/test_subunit_describe_calls.py
+++ b/tempest/tests/cmd/test_subunit_describe_calls.py
@@ -33,6 +33,16 @@
p.communicate()
self.assertEqual(0, p.returncode)
+ def test_return_code_no_output(self):
+ subunit_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ 'sample_streams/calls.subunit')
+ p = subprocess.Popen([
+ 'subunit-describe-calls', '-s', subunit_file],
+ stdin=subprocess.PIPE)
+ p.communicate()
+ self.assertEqual(0, p.returncode)
+
def test_parse(self):
subunit_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),