Merge "Publish BLACKLIST on Tempest plugin registry page"
diff --git a/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
new file mode 100644
index 0000000..384f916
--- /dev/null
+++ b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new parameter, compute/compute_volume_common_az is introduced to
+ specify availability zone where tempest creates instances and volumes
+ for scenario tests, to allow us to run scenario tests in the deployment
+ which has multiple availability zones and cinder/cross_az_attach in
+ nova.conf is set to False.
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 2cd8906..366d6a0 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -44,11 +44,14 @@
cls.addClassResourceCleanup(
cls.services_client.delete_service, service['id'])
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
- url=url, region=region, enabled=True)['endpoint']
+ url=url, region=region_name, enabled=True)['endpoint']
+ region = cls.regions_client.show_region(region_name)['region']
+ cls.addClassResourceCleanup(
+ cls.regions_client.delete_region, region['id'])
cls.addClassResourceCleanup(
cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@@ -108,17 +111,19 @@
@decorators.idempotent_id('0e2446d2-c1fd-461b-a729-b9e73e3e3b37')
def test_create_list_show_delete_endpoint(self):
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
interface = 'public'
endpoint = self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface,
- url=url, region=region,
+ url=url, region=region_name,
enabled=True)['endpoint']
+ region = self.regions_client.show_region(region_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
- self.assertEqual(region, endpoint['region'])
+ self.assertEqual(region_name, endpoint['region'])
self.assertEqual(url, endpoint['url'])
# Checking if created endpoint is present in the list of endpoints
@@ -133,7 +138,7 @@
self.assertEqual(self.service_ids[0], fetched_endpoint['service_id'])
self.assertEqual(interface, fetched_endpoint['interface'])
self.assertEqual(url, fetched_endpoint['url'])
- self.assertEqual(region, fetched_endpoint['region'])
+ self.assertEqual(region_name, fetched_endpoint['region'])
self.assertEqual(True, fetched_endpoint['enabled'])
# Deleting the endpoint created in this method
@@ -161,28 +166,33 @@
self.addCleanup(self.services_client.delete_service, service2['id'])
# Creating an endpoint so as to check update endpoint with new values
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
- self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
# Updating endpoint with new values
- region2 = data_utils.rand_name('region')
+ region2_name = data_utils.rand_name('region')
url2 = data_utils.rand_url()
interface2 = 'internal'
endpoint = self.client.update_endpoint(endpoint_for_update['id'],
service_id=service2['id'],
interface=interface2,
- url=url2, region=region2,
+ url=url2, region=region2_name,
enabled=False)['endpoint']
+ region2 = self.regions_client.show_region(region2_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region2['id'])
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+
# Asserting if the attributes of endpoint are updated
self.assertEqual(service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
- self.assertEqual(region2, endpoint['region'])
+ self.assertEqual(region2_name, endpoint['region'])
self.assertEqual(False, endpoint['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 4c3eb1c..164b577 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -70,14 +70,16 @@
def _assert_update_raises_bad_request(self, enabled):
# Create an endpoint
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_id,
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
self.assertRaises(lib_exc.BadRequest, self.client.update_endpoint,
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
old mode 100755
new mode 100644
index e1b8cf5..11f3bf9
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -121,7 +121,9 @@
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if server_status == 'ERROR' and not ignore_error:
- raise lib_exc.DeleteErrorException(resource_id=server_id)
+ raise lib_exc.DeleteErrorException(
+ "Server %s failed to delete and is in ERROR status" %
+ server_id)
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
diff --git a/tempest/config.py b/tempest/config.py
index 9e4718b..82cbe09 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -352,6 +352,19 @@
"If both values are not specified, Tempest avoids tests "
"which require a microversion. Valid values are string "
"with format 'X.Y' or string 'latest'"),
+ cfg.StrOpt('compute_volume_common_az',
+ default=None,
+ help='AZ to be used for Cinder and Nova. Set this parameter '
+ 'when the cloud has nova.conf: cinder.cross_az_attach '
+ 'set to false. Which means volumes attached to an '
+ 'instance must be in the same availability zone in Cinder '
+ 'as the instance availability zone in Nova. Set the '
+ 'common availability zone in this config which will be '
+ 'used to boot an instance as well as creating a volume. '
+ 'NOTE: If that AZ is not in Cinder (or '
+ 'allow_availability_zone_fallback=False in cinder.conf), '
+ 'the volume create request will fail and the instance '
+ 'will fail the build request.'),
]
placement_group = cfg.OptGroup(name='placement',
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 2dbdd11..a93c76e 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -212,7 +212,9 @@
except lib_exc.NotFound:
return True
if volume["volume"]["status"] == "error_deleting":
- raise lib_exc.DeleteErrorException(resource_id=id)
+ raise lib_exc.DeleteErrorException(
+ "Volume %s failed to delete and is in error_deleting status" %
+ volume['id'])
return False
@property
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 87d7e76..6ce5b78 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -274,6 +274,10 @@
tenant_network = self.get_tenant_network()
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
body, _ = compute.create_test_server(
clients,
tenant_network=tenant_network,
@@ -307,6 +311,11 @@
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
+
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,