Merge "Remove find_stack_traces.py"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 7189312..aca1845 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -352,6 +352,10 @@
.. _3.12: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id12
+ * `3.13`_
+
+ .. _3.13: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id13
+
* `3.14`_
.. _3.14: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id14
diff --git a/doc/source/plugin.rst b/doc/source/plugin.rst
index 2afb1e5..6f6621d 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugin.rst
@@ -132,7 +132,7 @@
Plugin Structure
================
-While there are no hard and fast rules for the structure a plugin, there are
+While there are no hard and fast rules for the structure of a plugin, there are
basically no constraints on what the plugin looks like as long as the 2 steps
above are done. However, there are some recommended patterns to follow to make
it easy for people to contribute and work with your plugin. For example, if you
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 49af95a..fff2405 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -61,7 +61,7 @@
which is executed in that order. Cleanup of resources provisioned during
the resource_setup must be scheduled right after provisioning using
-the addClassResourceCleanp helper. The resource cleanups stacked this way
+the addClassResourceCleanup helper. The resource cleanups stacked this way
are executed in reverse order during tearDownClass, before the cleanup of
test credentials takes place. An example of a TestCase which defines all
of these would be::
diff --git a/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
new file mode 100644
index 0000000..14458d6
--- /dev/null
+++ b/releasenotes/notes/add-update-api-to-group-types-client-09c06ccdf80d5003.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add update group types API to v3 ``group_types_client`` library;
+ min_microversion of this API is 3.11.
diff --git a/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
new file mode 100644
index 0000000..dbb6c46
--- /dev/null
+++ b/releasenotes/notes/removal-deprecated-config-options-3db535b979fe3509.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Below config options or feature flags were deprecated for removal.
+ It's time to remove them as all supported stable branches are
+ good to handle them.
+
+ * ``[identity-feature-enabled].forbid_global_implied_dsr``
diff --git a/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
new file mode 100644
index 0000000..17866e5
--- /dev/null
+++ b/releasenotes/notes/removed-tox-ostestr-8997a93d199c44f3.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The tox ostestr job (normally invoked with ``tox -eostestr``) has been
+ removed. This was lightly used, and in the near future ostestr will be
+ removed from the tempest requirements file. If you were relying on this
+ functionality you can replicate it by using the venv-tempest tox job. For
+ example, simply running ``tox -evenv-tempest -- ostestr`` will do the same
+ thing the old ostestr job did.
diff --git a/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
new file mode 100644
index 0000000..ddd1704
--- /dev/null
+++ b/releasenotes/notes/volume-backed-live-mig-5a38b496ba1ec093.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ A new boolean configuration option
+ ``[compute-feature-enabled]/volume_backed_live_migration`` has been added.
+ If enabled, tests which validate the behavior of Nova's *volume-backed live
+ migration* feature will be executed. The option defaults to ``False``.
diff --git a/requirements.txt b/requirements.txt
index 2300214..cd74449 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,7 +12,7 @@
oslo.config>=5.1.0 # Apache-2.0
oslo.log>=3.30.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.utils>=3.31.0 # Apache-2.0
+oslo.utils>=3.33.0 # Apache-2.0
six>=1.10.0 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
PyYAML>=3.10 # MIT
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 411159b..dcd7b9b 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -132,7 +132,9 @@
def test_live_block_migration_paused(self):
self._test_live_migration(state='PAUSED')
- @decorators.skip_because(bug="1524898")
+ @testtools.skipUnless(CONF.compute_feature_enabled.
+ volume_backed_live_migration,
+ 'Volume-backed live migration not available')
@decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
@utils.services('volume')
def test_volume_backed_live_migration(self):
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index eabb907..7a74869 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -41,3 +41,18 @@
self.assertRaises(lib_exc.BadRequest,
self.delete_volume, volume['id'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('aab919e2-d992-4cbb-a4ed-745c2475398c')
+ def test_attach_attached_volume_to_same_server(self):
+ # Test attaching the same volume to the same instance once
+ # it's already attached. The nova/cinder validation for this differs
+ # depending on whether or not cinder v3.27 is being used to attach
+ # the volume to the instance.
+ server = self.create_test_server(wait_until='ACTIVE')
+ volume = self.create_volume()
+
+ self.attach_volume(server, volume)
+
+ self.assertRaises(lib_exc.BadRequest,
+ self.attach_volume, server, volume)
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 15b2008..ba19ff7 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -32,21 +32,18 @@
u_email = '%s@testmail.tm' % u_name
u_password = data_utils.rand_password()
for _ in range(2):
- cls.project = cls.projects_client.create_project(
+ project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'))['project']
- cls.projects.append(cls.project['id'])
+ cls.addClassResourceCleanup(
+ cls.projects_client.delete_project, project['id'])
+ cls.projects.append(project['id'])
cls.user_body = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.users_client.delete_user(cls.user_body['id'])
- for p in cls.projects:
- cls.projects_client.delete_project(p)
- super(CredentialsTestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(
+ cls.users_client.delete_user, cls.user_body['id'])
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index f731697..c4e0622 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -37,18 +37,6 @@
super(DomainConfigurationTestJSON, cls).setup_clients()
cls.client = cls.domain_config_client
- @classmethod
- def resource_setup(cls):
- super(DomainConfigurationTestJSON, cls).resource_setup()
- cls.group = cls.groups_client.create_group(
- name=data_utils.rand_name('group'),
- description=data_utils.rand_name('group-desc'))['group']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- super(DomainConfigurationTestJSON, cls).resource_cleanup()
-
def _create_domain_and_config(self, config):
domain = self.setup_test_domain()
config = self.client.create_domain_config(domain['id'], **config)[
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 49dbba1..eef93c2 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -28,11 +29,12 @@
@classmethod
def resource_setup(cls):
super(EndPointGroupsTest, cls).resource_setup()
- cls.service_ids = list()
cls.endpoint_groups = list()
# Create endpoint group so as to use it for LIST test
service_id = cls._create_service()
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
@@ -42,18 +44,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint_group, endpoint_group['id'])
cls.endpoint_groups.append(endpoint_group)
@classmethod
- def resource_cleanup(cls):
- for e in cls.endpoint_groups:
- cls.client.delete_endpoint_group(e['id'])
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointGroupsTest, cls).resource_cleanup()
-
- @classmethod
def _create_service(cls):
s_name = data_utils.rand_name('service')
s_type = data_utils.rand_name('type')
@@ -64,7 +60,6 @@
description=s_description))
service_id = service_data['service']['id']
- cls.service_ids.append(service_id)
return service_id
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
@@ -78,6 +73,9 @@
name=name,
description=description,
filters=filters)['endpoint_group']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint_group, endpoint_group['id'])
self.endpoint_groups.append(endpoint_group)
@@ -115,7 +113,6 @@
# Deleting the endpoint group created in this method
self.client.delete_endpoint_group(endpoint_group['id'])
- self.endpoint_groups.remove(endpoint_group)
# Checking whether endpoint group is deleted successfully
fetched_endpoints = \
@@ -136,10 +133,12 @@
name=name,
description=description,
filters=filters)['endpoint_group']
- self.endpoint_groups.append(endpoint_group)
+ self.addCleanup(self.client.delete_endpoint_group,
+ endpoint_group['id'])
# Creating new attr values to update endpoint group
service2_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service2_id)
name2 = data_utils.rand_name('service_group2')
description2 = data_utils.rand_name('description2')
filters = {'service_id': service2_id}
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 5d48f68..874aaa4 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -15,6 +15,7 @@
from tempest.api.identity import base
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -34,12 +35,18 @@
interfaces = ['public', 'internal']
cls.setup_endpoint_ids = list()
for i in range(2):
- cls._create_service()
+ service = cls._create_service()
+ cls.service_ids.append(service['id'])
+ cls.addClassResourceCleanup(
+ cls.services_client.delete_service, service['id'])
+
region = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
url=url, region=region, enabled=True)['endpoint']
+ cls.addClassResourceCleanup(
+ cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@classmethod
@@ -53,17 +60,7 @@
service_data = (
cls.services_client.create_service(name=s_name, type=s_type,
description=s_description))
- service = service_data['service']
- cls.service_ids.append(service['id'])
- return service
-
- @classmethod
- def resource_cleanup(cls):
- for e in cls.setup_endpoint_ids:
- cls.client.delete_endpoint(e)
- for s in cls.service_ids:
- cls.services_client.delete_service(s)
- super(EndPointsTestJSON, cls).resource_cleanup()
+ return service_data['service']
@decorators.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
def test_list_endpoints(self):
@@ -114,8 +111,8 @@
interface=interface,
url=url, region=region,
enabled=True)['endpoint']
-
- self.setup_endpoint_ids.append(endpoint['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
self.assertEqual(region, endpoint['region'])
self.assertEqual(url, endpoint['url'])
@@ -137,7 +134,6 @@
# Deleting the endpoint created in this method
self.client.delete_endpoint(endpoint['id'])
- self.setup_endpoint_ids.remove(endpoint['id'])
# Checking whether endpoint is deleted successfully
fetched_endpoints = self.client.list_endpoints()['endpoints']
@@ -147,8 +143,20 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('37e8f15e-ee7c-4657-a1e7-f6b61e375eff')
def test_update_endpoint(self):
- # Creating an endpoint so as to check update endpoint
- # with new values
+ # NOTE(zhufl) Service2 should be created before endpoint_for_update
+ # is created, because Service2 must be deleted after
+ # endpoint_for_update is deleted, otherwise we will get a 404 error
+ # when deleting endpoint_for_update if endpoint's service is deleted.
+
+ # Creating service for updating endpoint with new service ID
+ s_name = data_utils.rand_name('service')
+ s_type = data_utils.rand_name('type')
+ s_description = data_utils.rand_name('description')
+ service2 = self._create_service(s_name=s_name, s_type=s_type,
+ s_description=s_description)
+ self.addCleanup(self.services_client.delete_service, service2['id'])
+
+ # Creating an endpoint so as to check update endpoint with new values
region1 = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
@@ -158,12 +166,7 @@
url=url1, region=region1,
enabled=True)['endpoint'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
- # Creating service so as update endpoint with new service ID
- s_name = data_utils.rand_name('service')
- s_type = data_utils.rand_name('type')
- s_description = data_utils.rand_name('description')
- service2 = self._create_service(s_name=s_name, s_type=s_type,
- s_description=s_description)
+
# Updating endpoint with new values
region2 = data_utils.rand_name('region')
url2 = data_utils.rand_url()
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index c0c79b9..68c0225 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -36,20 +36,19 @@
data_utils.rand_name('project-'),
description=data_utils.rand_name('project-desc-'),
domain_id=cls.domain['id'])['project']
+ cls.addClassResourceCleanup(cls.projects_client.delete_project,
+ cls.project['id'])
cls.group = cls.groups_client.create_group(
name=data_utils.rand_name('group-'), project_id=cls.project['id'],
domain_id=cls.domain['id'])['group']
+ cls.addClassResourceCleanup(cls.groups_client.delete_group,
+ cls.group['id'])
cls.user = cls.users_client.create_user(
name=u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.project['id'],
domain_id=cls.domain['id'])['user']
-
- @classmethod
- def resource_cleanup(cls):
- cls.groups_client.delete_group(cls.group['id'])
- cls.users_client.delete_user(cls.user['id'])
- cls.projects_client.delete_project(cls.project['id'])
- super(InheritsV3TestJSON, cls).resource_cleanup()
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.user['id'])
def _list_assertions(self, body, fetched_role_ids, role_id):
self.assertEqual(len(body), 1)
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 88cd8be..c69e4c8 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -47,21 +47,18 @@
cls.domain_enabled_user = cls.users_client.create_user(
name=u1_name, password=alt_password,
email=cls.alt_email, domain_id=cls.domain['id'])['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.domain_enabled_user['id'])
cls.users.append(cls.domain_enabled_user)
# Create default not enabled user
u2_name = data_utils.rand_name('test_user')
cls.non_domain_enabled_user = cls.users_client.create_user(
name=u2_name, password=alt_password,
email=cls.alt_email, enabled=False)['user']
+ cls.addClassResourceCleanup(cls.users_client.delete_user,
+ cls.non_domain_enabled_user['id'])
cls.users.append(cls.non_domain_enabled_user)
- @classmethod
- def resource_cleanup(cls):
- # Cleanup the users created during setup
- for user in cls.users:
- cls.users_client.delete_user(user['id'])
- super(UsersV3TestJSON, cls).resource_cleanup()
-
@decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
def test_list_user_domains(self):
# List users with domain
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 7cd41e9..69cac33 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -338,14 +338,13 @@
# domain role to a global one
self._create_implied_role(domain_role1['id'], self.role['id'])
- if CONF.identity_feature_enabled.forbid_global_implied_dsr:
- # The contrary is not true: we can't create an inference rule
- # from a global role to a domain role
- self.assertRaises(
- lib_exc.Forbidden,
- self.roles_client.create_role_inference_rule,
- self.role['id'],
- domain_role1['id'])
+ # The contrary is not true: we can't create an inference rule
+ # from a global role to a domain role
+ self.assertRaises(
+ lib_exc.Forbidden,
+ self.roles_client.create_role_inference_rule,
+ self.role['id'],
+ domain_role1['id'])
@decorators.idempotent_id('3859df7e-5b78-4e4d-b10e-214c8953842a')
def test_assignments_for_domain_roles(self):
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index d7c85a2..3bbab11 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -44,14 +44,13 @@
for i in range(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % six.int2byte(i))
cls.container_client.update_container(name)
+ cls.addClassResourceCleanup(base.delete_containers,
+ [name],
+ cls.container_client,
+ cls.object_client)
cls.containers.append(name)
cls.containers_count = len(cls.containers)
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(AccountTest, cls).resource_cleanup()
-
@decorators.attr(type='smoke')
@decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 51b0a1d..75111b6 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -24,16 +24,6 @@
class ContainerTest(base.BaseObjectTest):
- @classmethod
- def resource_setup(cls):
- super(ContainerTest, cls).resource_setup()
- cls.containers = []
-
- @classmethod
- def resource_cleanup(cls):
- cls.delete_containers()
- super(ContainerTest, cls).resource_cleanup()
-
def assertContainer(self, container, count, byte, versioned):
resp, _ = self.container_client.list_container_metadata(container)
self.assertHeaders(resp, 'Container', 'HEAD')
@@ -52,7 +42,10 @@
# create container
vers_container_name = data_utils.rand_name(name='TestVersionContainer')
resp, _ = self.container_client.update_container(vers_container_name)
- self.containers.append(vers_container_name)
+ self.addCleanup(base.delete_containers,
+ [vers_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(vers_container_name, '0', '0', 'Missing Header')
@@ -61,7 +54,10 @@
resp, _ = self.container_client.update_container(
base_container_name,
**headers)
- self.containers.append(base_container_name)
+ self.addCleanup(base.delete_containers,
+ [base_container_name],
+ self.container_client,
+ self.object_client)
self.assertHeaders(resp, 'Container', 'PUT')
self.assertContainer(base_container_name, '0', '0',
vers_container_name)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
new file mode 100644
index 0000000..45f4caa
--- /dev/null
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -0,0 +1,202 @@
+# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseGroupSnapshotsTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
+ def _create_group_snapshot(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group_Snapshot')
+
+ group_snapshot = self.group_snapshots_client.create_group_snapshot(
+ **kwargs)['group_snapshot']
+ group_snapshot['group_id'] = kwargs['group_id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self._delete_group_snapshot, group_snapshot)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], 'available')
+ return group_snapshot
+
+ def _delete_group_snapshot(self, group_snapshot):
+ self.group_snapshots_client.delete_group_snapshot(group_snapshot['id'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for vol in vols:
+ for snap in snapshots:
+ if (vol['group_id'] == group_snapshot['group_id'] and
+ vol['id'] == snap['volume_id']):
+ self.snapshots_client.wait_for_resource_deletion(
+ snap['id'])
+ self.group_snapshots_client.wait_for_resource_deletion(
+ group_snapshot['id'])
+
+
+class GroupSnapshotsTest(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.14'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
+ def test_group_snapshot_create_show_list_delete(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get a given group snapshot
+ group_snapshot = self.group_snapshots_client.show_group_snapshot(
+ group_snapshot['id'])['group_snapshot']
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+
+ # Get all group snapshots with details, check some detail-specific
+ # elements, and look for the created group snapshot
+ group_snapshots = self.group_snapshots_client.list_group_snapshots(
+ detail=True)['group_snapshots']
+ for grp_snapshot in group_snapshots:
+ self.assertIn('created_at', grp_snapshot)
+ self.assertIn('group_id', grp_snapshot)
+ self.assertIn((group_snapshot['name'], group_snapshot['id']),
+ [(m['name'], m['id']) for m in group_snapshots])
+
+ # Delete group snapshot
+ self._delete_group_snapshot(group_snapshot)
+ group_snapshots = self.group_snapshots_client.list_group_snapshots()[
+ 'group_snapshots']
+ self.assertEmpty(group_snapshots)
+
+ @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
+ def test_create_group_from_group_snapshot(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ vol = self.create_volume(volume_type=volume_type['id'],
+ group_id=grp['id'])
+
+ # Create group_snapshot
+ group_snapshot_name = data_utils.rand_name('group_snapshot')
+ group_snapshot = self._create_group_snapshot(
+ group_id=grp['id'], name=group_snapshot_name)
+ self.assertEqual(group_snapshot_name, group_snapshot['name'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if vol['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Create Group from Group snapshot
+ grp_name2 = data_utils.rand_name('Group_from_snap')
+ grp2 = self.groups_client.create_group_from_source(
+ group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, vol['id'], 'available')
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, grp2['id'], 'available')
+
+
+class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
+ _api_version = 3
+ min_microversion = '3.19'
+ max_microversion = 'latest'
+
+ @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
+ def test_reset_group_snapshot_status(self):
+ # Create volume type
+ volume_type = self.create_volume_type()
+
+ # Create group type
+ group_type = self.create_group_type()
+
+ # Create group
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
+
+ # Create volume
+ volume = self.create_volume(volume_type=volume_type['id'],
+ group_id=group['id'])
+
+ # Create group snapshot
+ group_snapshot = self._create_group_snapshot(group_id=group['id'])
+ snapshots = self.snapshots_client.list_snapshots(
+ detail=True)['snapshots']
+ for snap in snapshots:
+ if volume['id'] == snap['volume_id']:
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snap['id'], 'available')
+
+ # Reset group snapshot status
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.group_snapshots_client,
+ group_snapshot['id'], 'available')
+ self.addCleanup(
+ self.admin_group_snapshots_client.reset_group_snapshot_status,
+ group_snapshot['id'], 'available')
+ for status in ['creating', 'available', 'error']:
+ self.admin_group_snapshots_client.reset_group_snapshot_status(
+ group_snapshot['id'], status)
+ waiters.wait_for_volume_resource_status(
+ self.group_snapshots_client, group_snapshot['id'], status)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 0df5fbd..6723207 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -24,7 +24,7 @@
max_microversion = 'latest'
@decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
- def test_group_type_create_list_show(self):
+ def test_group_type_create_list_update_show(self):
# Create/list/show group type.
name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
description = data_utils.rand_name("group-type-description")
@@ -46,8 +46,19 @@
self.assertIsInstance(group_list, list)
self.assertNotEmpty(group_list)
+ update_params = {
+ 'name': data_utils.rand_name(
+ self.__class__.__name__ + '-updated-group-type'),
+ 'description': 'updated-group-type-desc'
+ }
+ updated_group_type = self.admin_group_types_client.update_group_type(
+ body['id'], **update_params)['group_type']
+ for key, expected_val in update_params.items():
+ self.assertEqual(expected_val, updated_group_type[key])
+
fetched_group_type = self.admin_group_types_client.show_group_type(
body['id'])['group_type']
+ params.update(update_params) # Add updated params to original params.
for key in params.keys():
self.assertEqual(params[key], fetched_group_type[key],
'%s of the fetched group_type is different '
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 6b53d85..2f6eb6b 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -17,54 +17,14 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
-class BaseGroupsTest(base.BaseVolumeAdminTest):
-
- def _delete_group(self, grp_id, delete_volumes=True):
- self.groups_client.delete_group(grp_id, delete_volumes)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp_id:
- self.volumes_client.wait_for_resource_deletion(vol['id'])
- self.groups_client.wait_for_resource_deletion(grp_id)
-
- def _delete_group_snapshot(self, group_snapshot_id, grp_id):
- self.group_snapshots_client.delete_group_snapshot(group_snapshot_id)
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for vol in vols:
- for snap in snapshots:
- if (vol['group_id'] == grp_id and
- vol['id'] == snap['volume_id']):
- self.snapshots_client.wait_for_resource_deletion(
- snap['id'])
- self.group_snapshots_client.wait_for_resource_deletion(
- group_snapshot_id)
-
- def _create_group(self, group_type, volume_type, grp_name=None):
- if not grp_name:
- grp_name = data_utils.rand_name('Group')
- grp = self.groups_client.create_group(
- group_type=group_type['id'],
- volume_types=[volume_type['id']],
- name=grp_name)['group']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group, grp['id'])
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp['id'], 'available')
- self.assertEqual(grp_name, grp['name'])
- return grp
-
-
-class GroupsTest(BaseGroupsTest):
+class GroupsTest(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.14'
+ min_microversion = '3.13'
max_microversion = 'latest'
@decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
@@ -77,13 +37,15 @@
# Create group
grp1_name = data_utils.rand_name('Group1')
- grp1 = self._create_group(group_type, volume_type,
- grp_name=grp1_name)
+ grp1 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp1_name)
grp1_id = grp1['id']
grp2_name = data_utils.rand_name('Group2')
- grp2 = self._create_group(group_type, volume_type,
- grp_name=grp2_name)
+ grp2 = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']],
+ name=grp2_name)
grp2_id = grp2['id']
# Create volume
@@ -125,143 +87,12 @@
# Delete group
# grp1 has a volume so delete_volumes flag is set to True by default
- self._delete_group(grp1_id)
+ self.delete_group(grp1_id)
# grp2 is empty so delete_volumes flag can be set to False
- self._delete_group(grp2_id, delete_volumes=False)
+ self.delete_group(grp2_id, delete_volumes=False)
grps = self.groups_client.list_groups(detail=True)['groups']
self.assertEmpty(grps)
- @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
- def test_group_snapshot_create_show_list_delete(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get a given group snapshot
- group_snapshot = self.group_snapshots_client.show_group_snapshot(
- group_snapshot['id'])['group_snapshot']
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
-
- # Get all group snapshots with details, check some detail-specific
- # elements, and look for the created group snapshot
- group_snapshots = (self.group_snapshots_client.list_group_snapshots(
- detail=True)['group_snapshots'])
- for grp_snapshot in group_snapshots:
- self.assertIn('created_at', grp_snapshot)
- self.assertIn('group_id', grp_snapshot)
- self.assertIn((group_snapshot['name'], group_snapshot['id']),
- [(m['name'], m['id']) for m in group_snapshots])
-
- # Delete group snapshot
- self._delete_group_snapshot(group_snapshot['id'], grp['id'])
- group_snapshots = (self.group_snapshots_client.list_group_snapshots()
- ['group_snapshots'])
- self.assertEmpty(group_snapshots)
-
- @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
- def test_create_group_from_group_snapshot(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- vol = self.create_volume(volume_type=volume_type['id'],
- group_id=grp['id'])
-
- # Create group_snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (
- self.group_snapshots_client.create_group_snapshot(
- group_id=grp['id'],
- name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], grp['id'])
- self.assertEqual(group_snapshot_name, group_snapshot['name'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if vol['id'] == snap['volume_id']:
- waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Create Group from Group snapshot
- grp_name2 = data_utils.rand_name('Group_from_snap')
- grp2 = self.groups_client.create_group_from_source(
- group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
- @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
- def test_create_group_from_group(self):
- # Create volume type
- volume_type = self.create_volume_type()
-
- # Create group type
- group_type = self.create_group_type()
-
- # Create Group
- grp = self._create_group(group_type, volume_type)
-
- # Create volume
- self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
-
- # Create Group from Group
- grp_name2 = data_utils.rand_name('Group_from_grp')
- grp2 = self.groups_client.create_group_from_source(
- source_group_id=grp['id'], name=grp_name2)['group']
- self.addCleanup(self._delete_group, grp2['id'])
- self.assertEqual(grp_name2, grp2['name'])
- vols = self.volumes_client.list_volumes(detail=True)['volumes']
- for vol in vols:
- if vol['group_id'] == grp2['id']:
- waiters.wait_for_volume_resource_status(
- self.volumes_client, vol['id'], 'available')
- waiters.wait_for_volume_resource_status(
- self.groups_client, grp2['id'], 'available')
-
@decorators.idempotent_id('4a8a6fd2-8b3b-4641-8f54-6a6f99320006')
def test_group_update(self):
# Create volume type
@@ -271,7 +102,8 @@
group_type = self.create_group_type()
# Create Group
- grp = self._create_group(group_type, volume_type)
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volumes
grp_vols = []
@@ -317,56 +149,42 @@
self.assertEqual(2, len(grp_vols))
-class GroupsV319Test(BaseGroupsTest):
+class GroupsV314Test(base.BaseVolumeAdminTest):
_api_version = 3
- min_microversion = '3.19'
+ min_microversion = '3.14'
max_microversion = 'latest'
- @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
- def test_reset_group_snapshot_status(self):
+ @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
+ def test_create_group_from_group(self):
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
- # Create group
- group = self._create_group(group_type, volume_type)
+ # Create Group
+ grp = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Create volume
- volume = self.create_volume(volume_type=volume_type['id'],
- group_id=group['id'])
+ self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])
- # Create group snapshot
- group_snapshot_name = data_utils.rand_name('group_snapshot')
- group_snapshot = (self.group_snapshots_client.create_group_snapshot(
- group_id=group['id'], name=group_snapshot_name)['group_snapshot'])
- self.addCleanup(self._delete_group_snapshot,
- group_snapshot['id'], group['id'])
- snapshots = self.snapshots_client.list_snapshots(
- detail=True)['snapshots']
- for snap in snapshots:
- if volume['id'] == snap['volume_id']:
+ # Create Group from Group
+ grp_name2 = data_utils.rand_name('Group_from_grp')
+ grp2 = self.groups_client.create_group_from_source(
+ source_group_id=grp['id'], name=grp_name2)['group']
+ self.addCleanup(self.delete_group, grp2['id'])
+ self.assertEqual(grp_name2, grp2['name'])
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == grp2['id']:
waiters.wait_for_volume_resource_status(
- self.snapshots_client, snap['id'], 'available')
+ self.volumes_client, vol['id'], 'available')
waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], 'available')
-
- # Reset group snapshot status
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.group_snapshots_client,
- group_snapshot['id'], 'available')
- self.addCleanup(
- self.admin_group_snapshots_client.reset_group_snapshot_status,
- group_snapshot['id'], 'available')
- for status in ['creating', 'available', 'error']:
- self.admin_group_snapshots_client.reset_group_snapshot_status(
- group_snapshot['id'], status)
- waiters.wait_for_volume_resource_status(
- self.group_snapshots_client, group_snapshot['id'], status)
+ self.groups_client, grp2['id'], 'available')
-class GroupsV320Test(BaseGroupsTest):
+class GroupsV320Test(base.BaseVolumeAdminTest):
_api_version = 3
min_microversion = '3.20'
max_microversion = 'latest'
@@ -380,7 +198,8 @@
group_type = self.create_group_type()
# Create group
- group = self._create_group(group_type, volume_type)
+ group = self.create_group(group_type=group_type['id'],
+ volume_types=[volume_type['id']])
# Reset group status
self.addCleanup(waiters.wait_for_volume_resource_status,
diff --git a/tempest/api/volume/admin/test_snapshot_manage.py b/tempest/api/volume/admin/test_snapshot_manage.py
index 9ff7160..3b21b28 100644
--- a/tempest/api/volume/admin/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/test_snapshot_manage.py
@@ -35,6 +35,9 @@
def skip_checks(cls):
super(SnapshotManageAdminTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.snapshot:
+ raise cls.skipException("Cinder volume snapshots are disabled")
+
if not CONF.volume_feature_enabled.manage_snapshot:
raise cls.skipException("Manage snapshot tests are disabled")
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 63ef85b..ea3bb5a 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -242,6 +242,27 @@
self.servers_client.delete_server, body['id'])
return body
+ def create_group(self, **kwargs):
+ if 'name' not in kwargs:
+ kwargs['name'] = data_utils.rand_name(
+ self.__class__.__name__ + '-Group')
+
+ group = self.groups_client.create_group(**kwargs)['group']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.delete_group, group['id'])
+ waiters.wait_for_volume_resource_status(
+ self.groups_client, group['id'], 'available')
+ return group
+
+ def delete_group(self, group_id, delete_volumes=True):
+ self.groups_client.delete_group(group_id, delete_volumes)
+ if delete_volumes:
+ vols = self.volumes_client.list_volumes(detail=True)['volumes']
+ for vol in vols:
+ if vol['group_id'] == group_id:
+ self.volumes_client.wait_for_resource_deletion(vol['id'])
+ self.groups_client.wait_for_resource_deletion(group_id)
+
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 86fe3f5..638ad9b 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -229,7 +229,7 @@
clients.servers_client, server['id'], wait_until)
# Multiple validatable servers are not supported for now. Their
- # creation will fail with the condition above (l.58).
+ # creation will fail with the condition above.
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
_setup_validation_fip()
@@ -289,13 +289,21 @@
def create_websocket(url):
url = urlparse.urlparse(url)
- if url.scheme == 'https':
- client_socket = ssl.wrap_socket(socket.socket(socket.AF_INET,
- socket.SOCK_STREAM))
+ for res in socket.getaddrinfo(url.hostname, url.port,
+ socket.AF_UNSPEC, socket.SOCK_STREAM):
+ af, socktype, proto, _, sa = res
+ client_socket = socket.socket(af, socktype, proto)
+ if url.scheme == 'https':
+ client_socket = ssl.wrap_socket(client_socket)
+ client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ client_socket.connect(sa)
+ except socket.error:
+ client_socket.close()
+ continue
+ break
else:
- client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- client_socket.connect((url.hostname, url.port))
+ raise socket.error('WebSocket creation failed')
# Turn the Socket into a WebSocket to do the communication
return _WebSocket(client_socket, url)
diff --git a/tempest/config.py b/tempest/config.py
index 5eee86d..fc95df8 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -229,17 +229,6 @@
"Empty list indicates all extensions are disabled. "
"To get the list of extensions run: "
"'openstack extension list --identity'"),
- # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed
- # in Newton and Ocata. This option can be removed after Mitaka is end of
- # life.
- cfg.BoolOpt('forbid_global_implied_dsr',
- default=False,
- help='Does the environment forbid global roles implying '
- 'domain specific ones?',
- deprecated_for_removal=True,
- deprecated_reason="This feature flag was introduced to "
- "support testing of old OpenStack versions, "
- "which are not supported anymore"),
cfg.BoolOpt('domain_specific_drivers',
default=False,
help='Are domain specific drivers enabled? '
@@ -484,6 +473,10 @@
default=False,
help='Does the test environment support in-place swapping of '
'volumes attached to a server instance?'),
+ cfg.BoolOpt('volume_backed_live_migration',
+ default=False,
+ help='Does the test environment support volume-backed live '
+ 'migration?'),
]
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 97bac48..6181472 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -75,3 +75,16 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def update_group_type(self, group_type_id, **kwargs):
+ """Updates a group type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://developer.openstack.org/api-ref/block-storage/v3/#update-group-type
+ """
+ post_body = json.dumps({'group_type': kwargs})
+ resp, body = self.put('group_types/%s' % group_type_id, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index 0f456a2..e86594e 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.lib.services.volume.v3 import group_types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
@@ -97,6 +99,18 @@
self.FAKE_LIST_GROUP_TYPES,
bytes_body)
+ def _test_update_group_types(self, bytes_body=False):
+ resp_body = copy.deepcopy(self.FAKE_INFO_GROUP_TYPE)
+ resp_body['group_type'].pop('created_at')
+
+ self.check_service_client_function(
+ self.client.update_group_type,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ resp_body,
+ bytes_body,
+ group_type_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
+ name='updated-group-type-name')
+
def test_create_group_type_with_str_body(self):
self._test_create_group_type()
@@ -122,3 +136,9 @@
def test_list_group_types_with_bytes_body(self):
self._test_list_group_types(bytes_body=True)
+
+ def test_update_group_types_with_str_body(self):
+ self._test_update_group_types()
+
+ def test_update_group_types_with_bytes_body(self):
+ self._test_update_group_types(bytes_body=True)
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
deleted file mode 100755
index 43468e4..0000000
--- a/tools/tox_install.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Client constraint file contains this client version pin that is in conflict
-# with installing the client from source. We should remove the version pin in
-# the constraints file before applying it for from-source installation.
-
-CONSTRAINTS_FILE=$1
-shift 1
-
-set -e
-
-# NOTE(tonyb): Place this in the tox enviroment's log dir so it will get
-# published to logs.openstack.org for easy debugging.
-localfile="$VIRTUAL_ENV/log/upper-constraints.txt"
-
-if [[ $CONSTRAINTS_FILE != http* ]]; then
- CONSTRAINTS_FILE=file://$CONSTRAINTS_FILE
-fi
-# NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep
-curl $CONSTRAINTS_FILE --insecure --progress-bar --output $localfile
-
-pip install -c$localfile openstack-requirements
-
-# This is the main purpose of the script: Allow local installation of
-# the current repo. It is listed in constraints file and thus any
-# install will be constrained and we need to unconstrain it.
-edit-constraints $localfile -- $CLIENT_NAME
-
-pip install -c$localfile -U $*
-exit $?
diff --git a/tox.ini b/tox.ini
index db5976f..e7ea1e2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,9 +8,8 @@
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
- BRANCH_NAME=master
- CLIENT_NAME=tempest
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
[testenv]
@@ -18,14 +17,12 @@
VIRTUAL_ENV={envdir}
OS_LOG_CAPTURE=1
PYTHONWARNINGS=default::DeprecationWarning
- BRANCH_NAME=master
- CLIENT_NAME=tempest
passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
-install_command =
- {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
whitelist_externals = *
deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =