Merge "Fix cleanup of resources for object_storage tests"
diff --git a/releasenotes/notes/2023.2-intermediate-release-8725d48b96854dce.yaml b/releasenotes/notes/2023.2-intermediate-release-8725d48b96854dce.yaml
new file mode 100644
index 0000000..7d3d3c4
--- /dev/null
+++ b/releasenotes/notes/2023.2-intermediate-release-8725d48b96854dce.yaml
@@ -0,0 +1,5 @@
+---
+prelude: >
+ This is an intermediate release during the 2023.2 development cycle to
+ make scenario tests server SSHABLE functionality available to plugins
+ and other consumers.
diff --git a/releasenotes/notes/add-volume-detach-libs-2cbb3ca924aed0ac.yaml b/releasenotes/notes/add-volume-detach-libs-2cbb3ca924aed0ac.yaml
new file mode 100644
index 0000000..30127b3
--- /dev/null
+++ b/releasenotes/notes/add-volume-detach-libs-2cbb3ca924aed0ac.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add delete_attachment to the v3 AttachmentsClient and terminate_connection
+ to the v3 VolumesClient.
diff --git a/releasenotes/notes/end-of-support-of-xena-2e747cff7f8bc48a.yaml b/releasenotes/notes/end-of-support-of-xena-2e747cff7f8bc48a.yaml
new file mode 100644
index 0000000..39f6866
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-xena-2e747cff7f8bc48a.yaml
@@ -0,0 +1,12 @@
+---
+prelude: >
+ This is an intermediate release during the 2023.2 development cycle to
+ mark the end of support for EM Xena release in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * 2023.1
+ * Zed
+ * Yoga
+
+ Current development of Tempest is for OpenStack 2023.2 development
+ cycle.
diff --git a/releasenotes/notes/update-v3-entrypoint-29d56c902439cc03.yaml b/releasenotes/notes/update-v3-entrypoint-29d56c902439cc03.yaml
new file mode 100644
index 0000000..363e59f
--- /dev/null
+++ b/releasenotes/notes/update-v3-entrypoint-29d56c902439cc03.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Update default value of config option ``CONF.identity.v3_entrypoint_type``
+ from adminURL to public. This was deprecated in Q release, and was missed.
+ The default entrypoint used by tempest should be the public one.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 882413f..4c1edd5 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ v34.2.0
v34.0.0
v33.0.0
v32.0.0
diff --git a/releasenotes/source/v34.2.0.rst b/releasenotes/source/v34.2.0.rst
new file mode 100644
index 0000000..386cf71
--- /dev/null
+++ b/releasenotes/source/v34.2.0.rst
@@ -0,0 +1,6 @@
+=====================
+v34.2.0 Release Notes
+=====================
+
+.. release-notes:: 34.2.0 Release Notes
+ :version: 34.2.0
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index f7c0dd9..19026d3 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -258,6 +258,7 @@
port = self.ports_client.show_port(port_id)['port']
return port['status'] == 'ACTIVE'
+ @decorators.unstable_test(bug='2027605')
@decorators.attr(type='multinode')
@decorators.idempotent_id('0022c12e-a482-42b0-be2d-396b5f0cffe3')
@utils.requires_ext(service='network', extension='trunk')
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 260d4e0..b1bfac7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -462,9 +462,11 @@
self, server_id, new_flavor_id, wait_until='ACTIVE', **kwargs
):
"""resize and confirm_resize an server, waits for it to be ACTIVE."""
- self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
- waiters.wait_for_server_status(self.servers_client, server_id,
- 'VERIFY_RESIZE')
+ body = self.servers_client.resize_server(
+ server_id, new_flavor_id, **kwargs)
+ waiters.wait_for_server_status(
+ self.servers_client, server_id, 'VERIFY_RESIZE',
+ request_id=body.response['x-openstack-request-id'])
self.servers_client.confirm_resize_server(server_id)
waiters.wait_for_server_status(
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index e1e7fda..af58433 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -701,12 +701,6 @@
compute.shelve_server(self.client, self.server_id,
force_shelve_offload=True)
- def _unshelve_server():
- server_info = self.client.show_server(self.server_id)['server']
- if 'SHELVED' in server_info['status']:
- self.client.unshelve_server(self.server_id)
- self.addCleanup(_unshelve_server)
-
server = self.client.show_server(self.server_id)['server']
image_name = server['name'] + '-shelved'
params = {'name': image_name}
@@ -718,8 +712,13 @@
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
- self.client.unshelve_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ body = self.client.unshelve_server(self.server_id)
+ waiters.wait_for_server_status(
+ self.client,
+ self.server_id,
+ "ACTIVE",
+ request_id=body.response["x-openstack-request-id"],
+ )
glance_client.wait_for_resource_deletion(images[0]['id'])
@decorators.idempotent_id('8cf9f450-a871-42cf-9bef-77eba189c0b0')
@@ -817,3 +816,118 @@
backup_type='daily',
rotation=2,
name=backup1)
+
+
+class ServerActionsV293TestJSON(base.BaseV2ComputeTest):
+
+ min_microversion = '2.93'
+ volume_min_microversion = '3.68'
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServerActionsV293TestJSON, cls).setup_credentials()
+
+ @classmethod
+ def resource_setup(cls):
+ super(ServerActionsV293TestJSON, cls).resource_setup()
+ cls.server_id = cls.recreate_server(None, validatable=True)
+
+ @utils.services('volume')
+ @decorators.idempotent_id('6652dab9-ea24-4c93-ab5a-93d79c3041cf')
+ def test_rebuild_volume_backed_server(self):
+ """Test rebuilding a volume backed server"""
+ # We have to create a new server that is volume-backed since the one
+ # from setUp is not volume-backed.
+ kwargs = {'volume_backed': True,
+ 'wait_until': 'ACTIVE'}
+ validation_resources = {}
+ if CONF.validation.run_validation:
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ kwargs.update({'validatable': True,
+ 'validation_resources': validation_resources})
+ server = self.create_test_server(**kwargs)
+ server = self.servers_client.show_server(server['id'])['server']
+ self.addCleanup(self.delete_server, server['id'])
+ volume_id = server['os-extended-volumes:volumes_attached'][0]['id']
+ volume_before_rebuild = self.volumes_client.show_volume(volume_id)
+ image_before_rebuild = (
+ volume_before_rebuild['volume']
+ ['volume_image_metadata']['image_id'])
+ # Verify that image inside volume is our initial image before rebuild
+ self.assertEqual(self.image_ref, image_before_rebuild)
+
+ # Authentication is attempted in the following order of priority:
+ # 1.The key passed in, if one was passed in.
+ # 2.Any key we can find through an SSH agent (if allowed).
+ # 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
+ # ~/.ssh/ (if allowed).
+ # 4.Plain username/password auth, if a password was given.
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.ssh_user,
+ password=None,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+ output = linux_client.exec_command('touch test_file')
+ # No output means success
+ self.assertEqual('', output.strip())
+
+ # The server should be rebuilt using the provided image and data
+ meta = {'rebuild': 'server'}
+ new_name = data_utils.rand_name(self.__class__.__name__ + '-server')
+ password = 'rebuildPassw0rd'
+ rebuilt_server = self.servers_client.rebuild_server(
+ server['id'],
+ self.image_ref_alt,
+ name=new_name,
+ metadata=meta,
+ adminPass=password)['server']
+
+ # Verify the properties in the initial response are correct
+ self.assertEqual(server['id'], rebuilt_server['id'])
+ rebuilt_image_id = rebuilt_server['image']
+ # Since it is a volume backed server, image id will remain empty
+ self.assertEqual('', rebuilt_image_id)
+ self.assert_flavor_equal(self.flavor_ref, rebuilt_server['flavor'])
+
+ # Verify the server properties after the rebuild completes
+ waiters.wait_for_server_status(self.servers_client,
+ rebuilt_server['id'], 'ACTIVE')
+ server = self.servers_client.show_server(
+ rebuilt_server['id'])['server']
+ volume_id = server['os-extended-volumes:volumes_attached'][0]['id']
+ volume_after_rebuild = self.volumes_client.show_volume(volume_id)
+ image_after_rebuild = (
+ volume_after_rebuild['volume']
+ ['volume_image_metadata']['image_id'])
+
+ self.assertEqual(new_name, server['name'])
+ # Verify that volume ID remains same before and after rebuild
+ self.assertEqual(volume_before_rebuild['volume']['id'],
+ volume_after_rebuild['volume']['id'])
+ # Verify that image inside volume is our final image after rebuild
+ self.assertEqual(self.image_ref_alt, image_after_rebuild)
+
+ # Authentication is attempted in the following order of priority:
+ # 1.The key passed in, if one was passed in.
+ # 2.Any key we can find through an SSH agent (if allowed).
+ # 3.Any "id_rsa", "id_dsa" or "id_ecdsa" key discoverable in
+ # ~/.ssh/ (if allowed).
+ # 4.Plain username/password auth, if a password was given.
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(rebuilt_server, validation_resources),
+ self.ssh_alt_user,
+ password,
+ validation_resources['keypair']['private_key'],
+ server=rebuilt_server,
+ servers_client=self.servers_client)
+ linux_client.validate_authentication()
+ e = self.assertRaises(lib_exc.SSHExecCommandFailed,
+ linux_client.exec_command,
+ 'cat test_file')
+ # If we rebuilt the boot volume, then we should not find
+ # the file we touched.
+ self.assertIn('No such file or directory', str(e))
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 23e7fd8..11a1e6c 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -13,6 +13,7 @@
# under the License.
import io
+import time
from tempest.common import image as common_image
from tempest import config
@@ -22,6 +23,7 @@
import tempest.test
CONF = config.CONF
+BAD_REQUEST_RETRIES = 3
class BaseImageTest(tempest.test.BaseTestCase):
@@ -159,6 +161,82 @@
pass
return stores
+ def _update_image_with_retries(self, image, patch):
+ # NOTE(danms): If glance was unable to fetch the remote image via
+ # HTTP, it will return BadRequest. Because this can be transient in
+ # CI, we try this a few times before we agree that it has failed
+ # for a reason worthy of failing the test.
+ for i in range(BAD_REQUEST_RETRIES):
+ try:
+ self.client.update_image(image, patch)
+ break
+ except exceptions.BadRequest:
+ if i + 1 == BAD_REQUEST_RETRIES:
+ raise
+ else:
+ time.sleep(1)
+
+ def check_set_location(self):
+ image = self.client.create_image(container_format='bare',
+ disk_format='raw')
+
+ # Locations should be empty when there is no data
+ self.assertEqual('queued', image['status'])
+ self.assertEqual([], image['locations'])
+
+ # Add a new location
+ new_loc = {'metadata': {'foo': 'bar'},
+ 'url': CONF.image.http_image}
+ self._update_image_with_retries(image['id'], [
+ dict(add='/locations/-', value=new_loc)])
+
+ # The image should now be active, with one location that looks
+ # like we expect
+ image = self.client.show_image(image['id'])
+ self.assertEqual(1, len(image['locations']),
+ 'Image should have one location but has %i' % (
+ len(image['locations'])))
+ self.assertEqual(new_loc['url'], image['locations'][0]['url'])
+ self.assertEqual('bar', image['locations'][0]['metadata'].get('foo'))
+ if 'direct_url' in image:
+ self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+ # If we added the location directly, the image goes straight
+ # to active and no hashing is done
+ self.assertEqual('active', image['status'])
+ self.assertIsNone(None, image['os_hash_algo'])
+ self.assertIsNone(None, image['os_hash_value'])
+
+ return image
+
+ def check_set_multiple_locations(self):
+ image = self.check_set_location()
+
+ new_loc = {'metadata': {'speed': '88mph'},
+ 'url': '%s#new' % CONF.image.http_image}
+ self._update_image_with_retries(image['id'],
+ [dict(add='/locations/-',
+ value=new_loc)])
+
+ # The image should now have two locations and the last one
+ # (locations are ordered) should have the new URL.
+ image = self.client.show_image(image['id'])
+ self.assertEqual(2, len(image['locations']),
+ 'Image should have two locations but has %i' % (
+ len(image['locations'])))
+ self.assertEqual(new_loc['url'], image['locations'][1]['url'])
+
+ # The image should still be active and still have no hashes
+ self.assertEqual('active', image['status'])
+ self.assertIsNone(None, image['os_hash_algo'])
+ self.assertIsNone(None, image['os_hash_value'])
+
+ # The direct_url should still match the first location
+ if 'direct_url' in image:
+ self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+ return image
+
class BaseV2MemberImageTest(BaseV2ImageTest):
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index 733c778..ce50c5d 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -20,6 +20,7 @@
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -59,6 +60,24 @@
self.assertNotEqual(created_image_info['owner'],
updated_image_info['owner'])
+ @decorators.idempotent_id('f6ab4aa0-035e-4664-9f2d-c57c6df50605')
+ def test_list_public_image(self):
+ """Test create image as admin and list public image as none admin"""
+ name = data_utils.rand_name(self.__class__.__name__ + '-Image')
+ image = self.admin_client.create_image(
+ name=name,
+ container_format='bare',
+ visibility='public',
+ disk_format='raw')
+ waiters.wait_for_image_status(self.admin_client, image['id'], 'queued')
+ created_image = self.admin_client.show_image(image['id'])
+ self.assertEqual(image['id'], created_image['id'])
+ self.addCleanup(self.admin_client.delete_image, image['id'])
+
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertIn(image['id'], fetched_images_id)
+
class ImportCopyImagesTest(base.BaseV2ImageAdminTest):
"""Test the import copy-image operations"""
@@ -120,3 +139,40 @@
self.assertEqual(0, len(failed_stores),
"Failed to copy the following stores: %s" %
str(failed_stores))
+
+
+class ImageLocationsAdminTest(base.BaseV2ImageAdminTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(ImageLocationsAdminTest, cls).skip_checks()
+ if not CONF.image_feature_enabled.manage_locations:
+ skip_msg = (
+ "%s skipped as show_multiple_locations is not available" % (
+ cls.__name__))
+ raise cls.skipException(skip_msg)
+
+ @decorators.idempotent_id('8a648de4-b745-4c28-a7b5-20de1c3da4d2')
+ def test_delete_locations(self):
+ image = self.check_set_multiple_locations()
+ expected_remaining_loc = image['locations'][1]
+
+ self.admin_client.update_image(image['id'], [
+ dict(remove='/locations/0')])
+
+ # The image should now have only the one location we did not delete
+ image = self.client.show_image(image['id'])
+ self.assertEqual(1, len(image['locations']),
+ 'Image should have one location but has %i' % (
+ len(image['locations'])))
+ self.assertEqual(expected_remaining_loc['url'],
+ image['locations'][0]['url'])
+
+ # The direct_url should now be the last remaining location
+ if 'direct_url' in image:
+ self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+ # Removing the last location should be disallowed
+ self.assertRaises(lib_exc.Forbidden,
+ self.admin_client.update_image, image['id'], [
+ dict(remove='/locations/0')])
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index b723977..977ad82 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -16,19 +16,18 @@
import io
import random
-import time
from oslo_log import log as logging
from tempest.api.image import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
-BAD_REQUEST_RETRIES = 3
class ImportImagesTest(base.BaseV2ImageTest):
@@ -735,6 +734,30 @@
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
+ @decorators.idempotent_id('d43f3efc-da4c-4af9-b636-868f0c6acedb')
+ def test_list_hidden_image(self):
+ image = self.client.create_image(os_hidden=True)
+ image = image['image'] if 'image' in image else image
+ self.addCleanup(self.client.wait_for_resource_deletion, image['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_image, image['id'])
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertNotIn(image['id'], fetched_images_id)
+
+ @decorators.idempotent_id('fdb96b81-257b-42ac-978b-ddeefa3760e4')
+ def test_list_update_hidden_image(self):
+ image = self.create_image()
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertIn(image['id'], fetched_images_id)
+
+ self.client.update_image(image['id'],
+ [dict(replace='/os_hidden', value=True)])
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertNotIn(image['id'], fetched_images_id)
+
class ListSharedImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of a shared image information"""
@@ -808,89 +831,13 @@
return image
- def _check_set_location(self):
- image = self.client.create_image(container_format='bare',
- disk_format='raw')
-
- # Locations should be empty when there is no data
- self.assertEqual('queued', image['status'])
- self.assertEqual([], image['locations'])
-
- # Add a new location
- new_loc = {'metadata': {'foo': 'bar'},
- 'url': CONF.image.http_image}
- self._update_image_with_retries(image['id'], [
- dict(add='/locations/-', value=new_loc)])
-
- # The image should now be active, with one location that looks
- # like we expect
- image = self.client.show_image(image['id'])
- self.assertEqual(1, len(image['locations']),
- 'Image should have one location but has %i' % (
- len(image['locations'])))
- self.assertEqual(new_loc['url'], image['locations'][0]['url'])
- self.assertEqual('bar', image['locations'][0]['metadata'].get('foo'))
- if 'direct_url' in image:
- self.assertEqual(image['direct_url'], image['locations'][0]['url'])
-
- # If we added the location directly, the image goes straight
- # to active and no hashing is done
- self.assertEqual('active', image['status'])
- self.assertIsNone(None, image['os_hash_algo'])
- self.assertIsNone(None, image['os_hash_value'])
-
- return image
-
@decorators.idempotent_id('37599b8a-d5c0-4590-aee5-73878502be15')
def test_set_location(self):
- self._check_set_location()
-
- def _update_image_with_retries(self, image, patch):
- # NOTE(danms): If glance was unable to fetch the remote image via
- # HTTP, it will return BadRequest. Because this can be transient in
- # CI, we try this a few times before we agree that it has failed
- # for a reason worthy of failing the test.
- for i in range(BAD_REQUEST_RETRIES):
- try:
- self.client.update_image(image, patch)
- break
- except lib_exc.BadRequest:
- if i + 1 == BAD_REQUEST_RETRIES:
- raise
- else:
- time.sleep(1)
-
- def _check_set_multiple_locations(self):
- image = self._check_set_location()
-
- new_loc = {'metadata': {'speed': '88mph'},
- 'url': '%s#new' % CONF.image.http_image}
- self._update_image_with_retries(image['id'],
- [dict(add='/locations/-',
- value=new_loc)])
-
- # The image should now have two locations and the last one
- # (locations are ordered) should have the new URL.
- image = self.client.show_image(image['id'])
- self.assertEqual(2, len(image['locations']),
- 'Image should have two locations but has %i' % (
- len(image['locations'])))
- self.assertEqual(new_loc['url'], image['locations'][1]['url'])
-
- # The image should still be active and still have no hashes
- self.assertEqual('active', image['status'])
- self.assertIsNone(None, image['os_hash_algo'])
- self.assertIsNone(None, image['os_hash_value'])
-
- # The direct_url should still match the first location
- if 'direct_url' in image:
- self.assertEqual(image['direct_url'], image['locations'][0]['url'])
-
- return image
+ self.check_set_location()
@decorators.idempotent_id('bf6e0009-c039-4884-b498-db074caadb10')
def test_replace_location(self):
- image = self._check_set_multiple_locations()
+ image = self.check_set_multiple_locations()
original_locs = image['locations']
# Replacing with the exact thing should work
@@ -927,31 +874,6 @@
len(image['locations'])))
self.assertEqual(original_locs, image['locations'])
- @decorators.idempotent_id('8a648de4-b745-4c28-a7b5-20de1c3da4d2')
- def test_delete_locations(self):
- image = self._check_set_multiple_locations()
- expected_remaining_loc = image['locations'][1]
-
- self.client.update_image(image['id'], [
- dict(remove='/locations/0')])
-
- # The image should now have only the one location we did not delete
- image = self.client.show_image(image['id'])
- self.assertEqual(1, len(image['locations']),
- 'Image should have one location but has %i' % (
- len(image['locations'])))
- self.assertEqual(expected_remaining_loc['url'],
- image['locations'][0]['url'])
-
- # The direct_url should now be the last remaining location
- if 'direct_url' in image:
- self.assertEqual(image['direct_url'], image['locations'][0]['url'])
-
- # Removing the last location should be disallowed
- self.assertRaises(lib_exc.Forbidden,
- self.client.update_image, image['id'], [
- dict(remove='/locations/0')])
-
@decorators.idempotent_id('a9a20396-8399-4b36-909d-564949be098f')
def test_set_location_bad_scheme(self):
image = self.client.create_image(container_format='bare',
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index ecddfba..b6e9f32 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -83,7 +83,7 @@
server_id = self.create_server()['id']
volume_id = self.create_volume()['id']
- # Attach volume
+ # Request Cinder to map & export volume (it's not attached to instance)
self.volumes_client.attach_volume(
volume_id,
instance_uuid=server_id,
@@ -101,7 +101,9 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
volume_id, 'error')
- # Force detach volume
+ # The force detach volume calls works because the volume is not really
+ # connected to the instance (it is safe), otherwise it would be
+ # rejected for security reasons (bug #2004555).
self.admin_volume_client.force_detach_volume(
volume_id, connector=None,
attachment_id=attachment['attachment_id'])
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 9ba9949..a31390a 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -51,6 +51,8 @@
def setup_credentials(cls):
cls.set_network_resources(
network=cls.create_default_network,
+ router=cls.create_default_network,
+ dhcp=cls.create_default_network,
subnet=cls.create_default_network)
super(BaseVolumeTest, cls).setup_credentials()
@@ -223,6 +225,14 @@
'name',
data_utils.rand_name(self.__class__.__name__ + '-instance'))
+ if wait_until == 'SSHABLE' and not kwargs.get('validation_resources'):
+ # If we were asked for SSHABLE but were not provided with the
+ # required validation_resources and validatable flag, ensure we
+ # pass them to create_test_server() so that it will actually wait.
+ kwargs['validation_resources'] = (
+ self.get_test_validation_resources(self.os_primary))
+ kwargs['validatable'] = True
+
tenant_network = self.get_tenant_network()
body, _ = compute.create_test_server(
self.os_primary,
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 138d120..85e4bb2 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -29,6 +29,8 @@
class VolumesBackupsTest(base.BaseVolumeTest):
"""Test volumes backup"""
+ create_default_network = True
+
@classmethod
def skip_checks(cls):
super(VolumesBackupsTest, cls).skip_checks()
@@ -116,7 +118,11 @@
# Create a server
volume = self.create_volume()
self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
- server = self.create_server()
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_server(wait_until='SSHABLE',
+ validation_resources=validation_resources,
+ validatable=True)
# Attach volume to instance
self.attach_volume(server['id'], volume['id'])
# Create backup using force flag
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index fcbc982..c5c94e1 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -46,6 +46,9 @@
@decorators.idempotent_id('86be1cba-2640-11e5-9c82-635fb964c912')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
"Cinder volume snapshots are disabled")
+ @testtools.skipUnless(
+ CONF.volume_feature_enabled.extend_volume_with_snapshot,
+ "Extending volume with snapshot is disabled.")
def test_volume_extend_when_volume_has_snapshot(self):
"""Test extending a volume which has a snapshot"""
volume = self.create_volume()
@@ -114,7 +117,7 @@
if the action on the server fails.
"""
# Create a test server. Will be automatically cleaned up on teardown.
- server = self.create_server()
+ server = self.create_server(wait_until='SSHABLE')
# Attach the volume to the server and wait for the volume status to be
# "in-use".
self.attach_volume(server['id'], volume['id'])
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index be8766d..53d44f1 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -304,6 +304,10 @@
# this additional wait state for later use.
wait_until_extra = None
if wait_until in ['PINGABLE', 'SSHABLE']:
+ if not validatable and validation_resources is None:
+ raise RuntimeError(
+ 'SSHABLE/PINGABLE requires validatable=True '
+ 'and validation_resources to be passed')
wait_until_extra = wait_until
wait_until = 'ACTIVE'
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index c5da412..d88bc05 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -77,7 +77,8 @@
if 'fault' in body:
details += 'Fault: %s.' % body['fault']
if request_id:
- details += ' Server boot request ID: %s.' % request_id
+ details += ' Request ID of server operation performed before'
+ details += ' checking the server status %s.' % request_id
raise exceptions.BuildErrorException(details, server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -92,7 +93,8 @@
'expected_task_state': expected_task_state,
'timeout': timeout})
if request_id:
- message += ' Server boot request ID: %s.' % request_id
+ message += ' Request ID of server operation performed before'
+ message += ' checking the server status %s.' % request_id
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = test_utils.find_test_caller()
diff --git a/tempest/config.py b/tempest/config.py
index 551578e..a174fdd 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -153,13 +153,11 @@
help="The public endpoint type to use for OpenStack Identity "
"(Keystone) API v2"),
cfg.StrOpt('v3_endpoint_type',
- default='adminURL',
+ default='public',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for OpenStack Identity "
- "(Keystone) API v3. The default value adminURL is "
- "deprecated and will be modified to publicURL in "
- "the next release."),
+ "(Keystone) API v3."),
cfg.StrOpt('admin_role',
default='admin',
help="Role required to administrate keystone."),
@@ -1109,7 +1107,13 @@
'server instance? This depends on the 3.42 volume API '
'microversion and the 2.51 compute API microversion. '
'Also, not all volume or compute backends support this '
+ 'operation.'),
+ cfg.BoolOpt('extend_volume_with_snapshot',
+ default=True,
+ help='Does the cloud support extending the size of a volume '
+ 'which has snapshot? Some drivers do not support this '
'operation.')
+
]
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index 4f44526..900e5ef 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -295,6 +295,7 @@
attach_volume = {'status_code': [202]}
set_bootable_volume = {'status_code': [200]}
detach_volume = {'status_code': [202]}
+terminate_connection = {'status_code': [202]}
reserve_volume = {'status_code': [202]}
unreserve_volume = {'status_code': [202]}
extend_volume = {'status_code': [202]}
diff --git a/tempest/lib/services/volume/v3/attachments_client.py b/tempest/lib/services/volume/v3/attachments_client.py
index 5e448f7..303341e 100644
--- a/tempest/lib/services/volume/v3/attachments_client.py
+++ b/tempest/lib/services/volume/v3/attachments_client.py
@@ -26,3 +26,10 @@
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+
+ def delete_attachment(self, attachment_id):
+ """Delete volume attachment."""
+ url = "attachments/%s" % (attachment_id)
+ resp, body = self.delete(url)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index ad8bd71..c6f8973 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -205,14 +205,23 @@
self.validate_response(schema.set_bootable_volume, resp, body)
return rest_client.ResponseBody(resp, body)
- def detach_volume(self, volume_id):
+ def detach_volume(self, volume_id, **kwargs):
"""Detaches a volume from an instance."""
- post_body = json.dumps({'os-detach': {}})
+ post_body = json.dumps({'os-detach': kwargs})
url = 'volumes/%s/action' % (volume_id)
resp, body = self.post(url, post_body)
self.validate_response(schema.detach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
+ def terminate_connection(self, volume_id, connector):
+ """Detaches a volume from an instance using terminate_connection."""
+ post_body = json.dumps(
+ {'os-terminate_connection': {'connector': connector}})
+ url = 'volumes/%s/action' % (volume_id)
+ resp, body = self.post(url, post_body)
+ self.validate_response(schema.terminate_connection, resp, body)
+ return rest_client.ResponseBody(resp, body)
+
def reserve_volume(self, volume_id):
"""Reserves a volume."""
post_body = json.dumps({'os-reserve': {}})
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index db0aa5a..20495ee 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import os
import subprocess
@@ -89,6 +90,16 @@
volume_microversion=cls.volume_request_microversion,
placement_microversion=cls.placement_request_microversion)
+ @classmethod
+ def setup_credentials(cls):
+ # Setting network=True, subnet=True creates a default network
+ cls.set_network_resources(
+ network=True,
+ subnet=True,
+ router=True,
+ dhcp=True)
+ super(ScenarioTest, cls).setup_credentials()
+
def setup_compute_client(cls):
"""Compute client"""
cls.compute_images_client = cls.os_primary.compute_images_client
@@ -184,7 +195,7 @@
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
- validatable=False, wait_until='ACTIVE',
+ validatable=None, wait_until='ACTIVE',
clients=None, **kwargs):
"""Wrapper utility that returns a test server.
@@ -309,6 +320,28 @@
kwargs.setdefault('availability_zone',
CONF.compute.compute_volume_common_az)
+ kwargs['validatable'] = bool(validatable)
+ keypair = kwargs.pop('keypair', None)
+ if wait_until == 'SSHABLE' and (
+ kwargs.get('validation_resources') is None):
+ # NOTE(danms): We should do this whether valdiation is enabled or
+ # not to consistently provide the resources to the
+ # create_test_server() function. If validation is disabled, then
+ # get_test_validation_resources() is basically a no-op for
+ # performance.
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ if keypair:
+ validation_resources = copy.deepcopy(validation_resources)
+ validation_resources.update(
+ keypair=keypair)
+ kwargs.update({
+ 'validatable': (validatable if validatable is not None
+ else True),
+ 'validation_resources': validation_resources})
+ if keypair:
+ kwargs.update({'key_name': keypair['name']})
+
body, _ = compute.create_test_server(
clients,
tenant_network=tenant_network,
@@ -1054,6 +1087,20 @@
floating_ip['id'])
return floating_ip
+ def get_floating_ip(self, server):
+ """Attempt to get an existing floating ip or a server
+
+ If one exists, return it, else return None
+ """
+ port_id, ip4 = self.get_server_port_id_and_ip4(server)
+ ips = self.floating_ips_client.list_floatingips(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id)
+ try:
+ return ips['floatingips'][0]['floating_ip_address']
+ except (KeyError, IndexError):
+ return None
+
def associate_floating_ip(self, floating_ip, server):
"""Associate floating ip to server
@@ -1148,8 +1195,14 @@
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
- return self.create_floating_ip(
- server, **kwargs)['floating_ip_address']
+ fip = self.get_floating_ip(server)
+ if fip:
+ # Already have a floating ip, so use it instead of creating
+ # another
+ return fip
+ else:
+ return self.create_floating_ip(
+ server, **kwargs)['floating_ip_address']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
@@ -1198,7 +1251,7 @@
create_kwargs = dict({'image_id': ''})
if keypair:
- create_kwargs['key_name'] = keypair['name']
+ create_kwargs['keypair'] = keypair
if security_group:
create_kwargs['security_groups'] = [
{'name': security_group['name']}]
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 9788e19..60abc02 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -45,9 +45,7 @@
raise cls.skipException('Encrypted volume attach is not supported')
def launch_instance(self):
- keypair = self.create_keypair()
-
- return self.create_server(key_name=keypair['name'])
+ return self.create_server(wait_until='SSHABLE')
def attach_detach_volume(self, server, volume):
attached_volume = self.nova_volume_attach(server, volume)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e6c6eb6..2c7c085 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -270,6 +270,11 @@
new_host = self.get_host_for_server(server['id'])
self.assertNotEqual(old_host, new_host, 'Server did not migrate')
+ # we first wait until the VM replies pings again, then check the
+ # network downtime
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
downtime = downtime_meter.get_downtime()
self.assertIsNotNone(downtime)
LOG.debug("Downtime seconds measured with downtime_meter = %r",
@@ -280,9 +285,6 @@
"Downtime of {} seconds is higher than expected '{}'".format(
downtime, allowed_downtime))
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
-
@decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 2a15470..3830fbc 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -49,16 +49,8 @@
def verify_ssh(self, keypair):
if self.run_ssh:
- # Obtain a floating IP if floating_ips is enabled
- if (CONF.network_feature_enabled.floating_ips and
- CONF.network.floating_network_name):
- fip = self.create_floating_ip(self.instance)
- self.ip = self.associate_floating_ip(
- fip, self.instance)['floating_ip_address']
- else:
- server = self.servers_client.show_server(
- self.instance['id'])['server']
- self.ip = self.get_server_ip(server)
+ # Obtain server IP
+ self.ip = self.get_server_ip(self.instance)
# Check ssh
self.ssh_client = self.get_remote_client(
ip_address=self.ip,
@@ -133,7 +125,8 @@
security_group = self.create_security_group()
self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
self.instance = self.create_server(
- key_name=keypair['name'],
+ keypair=keypair,
+ wait_until='SSHABLE',
security_groups=[{'name': security_group['name']}],
config_drive=CONF.compute_feature_enabled.config_drive,
metadata=self.md)
diff --git a/tempest/scenario/test_server_volume_attachment.py b/tempest/scenario/test_server_volume_attachment.py
new file mode 100644
index 0000000..cc8cf00
--- /dev/null
+++ b/tempest/scenario/test_server_volume_attachment.py
@@ -0,0 +1,189 @@
+# Copyright 2023 Red Hat
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class BaseAttachmentTest(manager.ScenarioTest):
+ @classmethod
+ def setup_clients(cls):
+ super().setup_clients()
+ cls.attachments_client = cls.os_primary.attachments_client_latest
+ cls.admin_volume_client = cls.os_admin.volumes_client_latest
+
+ def _call_with_fake_service_token(self, valid_token,
+ client, method_name, *args, **kwargs):
+ """Call client method with non-service service token
+
+ Add a service token header that can be a valid normal user token (which
+ won't have the service role) or an invalid token altogether.
+ """
+ original_raw_request = client.raw_request
+
+ def raw_request(url, method, headers=None, body=None, chunked=False,
+ log_req_body=None):
+ token = headers['X-Auth-Token']
+ if not valid_token:
+ token = token[:-1] + ('a' if token[-1] != 'a' else 'b')
+ headers['X-Service-Token'] = token
+ return original_raw_request(url, method, headers=headers,
+ body=body, chunked=chunked,
+ log_req_body=log_req_body)
+
+ client_method = getattr(client, method_name)
+ with mock.patch.object(client, 'raw_request', raw_request):
+ return client_method(*args, **kwargs)
+
+
+class TestServerVolumeAttachmentScenario(BaseAttachmentTest):
+
+ """Test server attachment behaviors
+
+ This tests that volume attachments to servers may not be removed directly
+ and are only allowed through the compute service (bug #2004555).
+ """
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('be615530-f105-437a-8afe-ce998c9535d9')
+ @utils.services('compute', 'volume', 'image', 'network')
+ def test_server_detach_rules(self):
+ """Test that various methods of detaching a volume honors the rules"""
+ server = self.create_server(wait_until='SSHABLE')
+ servers = self.servers_client.list_servers()['servers']
+ self.assertIn(server['id'], [x['id'] for x in servers])
+
+ volume = self.create_volume()
+
+ volume = self.nova_volume_attach(server, volume)
+ self.addCleanup(self.nova_volume_detach, server, volume)
+ att_id = volume['attachments'][0]['attachment_id']
+
+ # Test user call to detach volume is rejected
+ self.assertRaises((exceptions.Forbidden, exceptions.Conflict),
+ self.volumes_client.detach_volume, volume['id'])
+
+ # Test user call to terminate connection is rejected
+ self.assertRaises((exceptions.Forbidden, exceptions.Conflict),
+ self.volumes_client.terminate_connection,
+ volume['id'], connector={})
+
+ # Test faking of service token on call to detach, force detach,
+ # terminate_connection
+ for valid_token in (True, False):
+ valid_exceptions = [exceptions.Forbidden, exceptions.Conflict]
+ if not valid_token:
+ valid_exceptions.append(exceptions.Unauthorized)
+ self.assertRaises(
+ tuple(valid_exceptions),
+ self._call_with_fake_service_token,
+ valid_token,
+ self.volumes_client,
+ 'detach_volume',
+ volume['id'])
+ self.assertRaises(
+ tuple(valid_exceptions),
+ self._call_with_fake_service_token,
+ valid_token,
+ self.volumes_client,
+ 'terminate_connection',
+ volume['id'], connector={})
+
+ # Reset volume's status to error
+ self.admin_volume_client.reset_volume_status(volume['id'],
+ status='error')
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'error')
+
+ # For the cleanup, we need to reset the volume status to in-use before
+ # the other cleanup steps try to detach it.
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume['id'], 'in-use')
+ self.addCleanup(self.admin_volume_client.reset_volume_status,
+ volume['id'], status='in-use')
+
+ # Test user call to force detach volume is rejected
+ self.assertRaises(
+ (exceptions.Forbidden, exceptions.Conflict),
+ self.admin_volume_client.force_detach_volume,
+ volume['id'], connector=None,
+ attachment_id=att_id)
+
+ # Test trying to override detach with force and service token
+ for valid_token in (True, False):
+ valid_exceptions = [exceptions.Forbidden, exceptions.Conflict]
+ if not valid_token:
+ valid_exceptions.append(exceptions.Unauthorized)
+ self.assertRaises(
+ tuple(valid_exceptions),
+ self._call_with_fake_service_token,
+ valid_token,
+ self.admin_volume_client,
+ 'force_detach_volume',
+ volume['id'], connector=None, attachment_id=att_id)
+
+ # Test user call to detach with mismatch is rejected
+ volume2 = self.create_volume()
+ volume2 = self.nova_volume_attach(server, volume2)
+ att_id2 = volume2['attachments'][0]['attachment_id']
+ self.assertRaises(
+ (exceptions.Forbidden, exceptions.BadRequest),
+ self.volumes_client.detach_volume,
+ volume['id'], attachment_id=att_id2)
+
+
+class TestServerVolumeAttachScenarioOldVersion(BaseAttachmentTest):
+ volume_min_microversion = '3.27'
+ volume_max_microversion = 'latest'
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('6f4d2144-99f4-495c-8b0b-c6a537971418')
+ @utils.services('compute', 'volume', 'image', 'network')
+ def test_old_versions_reject(self):
+ server = self.create_server(wait_until='SSHABLE')
+ servers = self.servers_client.list_servers()['servers']
+ self.assertIn(server['id'], [x['id'] for x in servers])
+
+ volume = self.create_volume()
+
+ volume = self.nova_volume_attach(server, volume)
+ self.addCleanup(self.nova_volume_detach, server, volume)
+ att_id = volume['attachments'][0]['attachment_id']
+
+ for valid_token in (True, False):
+ valid_exceptions = [exceptions.Forbidden,
+ exceptions.Conflict]
+ if not valid_token:
+ valid_exceptions.append(exceptions.Unauthorized)
+ self.assertRaises(
+ tuple(valid_exceptions),
+ self._call_with_fake_service_token,
+ valid_token,
+ self.attachments_client,
+ 'delete_attachment',
+ att_id)
+
+ self.assertRaises(
+ (exceptions.Forbidden, exceptions.Conflict),
+ self.attachments_client.delete_attachment,
+ att_id)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 2e87c15..5c5033a 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -187,6 +187,7 @@
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True,
+ wait_until='SSHABLE',
name=name)
# Create a snapshot image from the volume-backed server.
# The compute service will have the block service create a snapshot of
@@ -200,7 +201,8 @@
# disk for the server.
name = data_utils.rand_name(self.__class__.__name__ +
'-image-snapshot-server')
- instance2 = self.create_server(image_id=image['id'], name=name)
+ instance2 = self.create_server(image_id=image['id'], name=name,
+ wait_until='SSHABLE')
# Verify the server was created from the image-defined BDM.
volume_attachments = instance2['os-extended-volumes:volumes_attached']
diff --git a/tempest/test.py b/tempest/test.py
index d49458e..3360221 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -809,7 +809,7 @@
@param os_clients: Clients to be used to provision the resources.
"""
if not CONF.validation.run_validation:
- return
+ return {}
if os_clients in cls._validation_resources:
return cls._validation_resources[os_clients]
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index 26e8079..80825a4 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -69,7 +69,7 @@
creds = fake_credentials.FakeKeystoneV3Credentials()
osclients = clients.Manager(creds)
vr = self.test_test_class.get_class_validation_resources(osclients)
- self.assertIsNone(vr)
+ self.assertEqual({}, vr)
def test_validation_resources_exists(self):
cfg.CONF.set_default('run_validation', True, 'validation')
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 2d978c0..0ac893a 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -13,6 +13,8 @@
roles: &base_roles
- zuul: opendev.org/openstack/devstack
vars: &base_vars
+ devstack_localrc:
+ IMAGE_URLS: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img, http://download.cirros-cloud.net/0.6.1/cirros-0.6.1-x86_64-disk.img
devstack_services:
tempest: true
devstack_local_conf:
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 233cb6c..9197e64 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -100,15 +100,6 @@
# Enbale horizon so that we can run horizon test.
horizon: true
-# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
-# one job on Focal. This job can be removed as per the future testing
-# runtime (whenever we drop the Ubuntu Focal testing).
-- job:
- name: tempest-full-ubuntu-focal
- description: This is tempest-full python3 job on Ubuntu Focal(20.04)
- parent: tempest-full-py3
- nodeset: openstack-single-node-focal
-
- job:
name: tempest-full-centos-9-stream
parent: tempest-full-py3
@@ -285,7 +276,7 @@
timeout: 10800
# This job runs on stable/stein onwards.
branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
- vars: &tempest_slow_vars
+ vars:
tox_envlist: slow-serial
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
@@ -295,7 +286,6 @@
devstack_services:
neutron-placement: true
neutron-qos: true
- tempest_concurrency: 2
group-vars:
# NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
# the controller and subnode prior to Rocky so we have to make sure the
@@ -311,16 +301,23 @@
# as swift is ready on py3 from stable/ussuri onwards.
timeout: 10800
branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
- vars: *tempest_slow_vars
-
-- job:
- name: tempest-slow-parallel
- parent: tempest-slow-py3
- # This job run slow tests in parallel.
vars:
tox_envlist: slow
devstack_localrc:
- MYSQL_REDUCE_MEMORY: true
+ CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
- job:
name: tempest-cinder-v2-api
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index be8442a..3223a1e 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -28,8 +28,6 @@
- ^.mailmap$
- tempest-extra-tests:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-ubuntu-focal:
- irrelevant-files: *tempest-irrelevant-files
- glance-multistore-cinder-import:
voting: false
irrelevant-files: *tempest-irrelevant-files
@@ -40,7 +38,7 @@
# those in respective stable branch gate.
- tempest-full-2023-1:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-xena:
+ - tempest-full-yoga:
irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full-py3:
irrelevant-files: *tempest-irrelevant-files
@@ -130,8 +128,6 @@
- openstack-tox-py310
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-ubuntu-focal:
- irrelevant-files: *tempest-irrelevant-files
- neutron-ovs-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-py3:
@@ -161,11 +157,9 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all:
irrelevant-files: *tempest-irrelevant-files
- - tempest-slow-parallel
- tempest-full-parallel
- tempest-full-zed-extra-tests
- tempest-full-yoga-extra-tests
- - tempest-full-xena-extra-tests
- tempest-full-enforce-scope-new-defaults-zed
- neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files: *tempest-irrelevant-files
@@ -188,19 +182,15 @@
- tempest-full-2023-1
- tempest-full-zed
- tempest-full-yoga
- - tempest-full-xena
- tempest-slow-2023-1
- tempest-slow-zed
- tempest-slow-yoga
- - tempest-slow-xena
- tempest-full-2023-1-extra-tests
- tempest-full-zed-extra-tests
- tempest-full-yoga-extra-tests
- - tempest-full-xena-extra-tests
periodic:
jobs:
- tempest-all
- - tempest-slow-parallel
- tempest-full-parallel
- tempest-full-oslo-master
- tempest-stestr-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index c5fc063..89435ce 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -18,12 +18,6 @@
override-checkout: stable/yoga
- job:
- name: tempest-full-xena
- parent: tempest-full-py3
- nodeset: openstack-single-node-focal
- override-checkout: stable/xena
-
-- job:
name: tempest-full-2023-1-extra-tests
parent: tempest-extra-tests
nodeset: openstack-single-node-jammy
@@ -42,12 +36,6 @@
override-checkout: stable/yoga
- job:
- name: tempest-full-xena-extra-tests
- parent: tempest-extra-tests
- nodeset: openstack-single-node-focal
- override-checkout: stable/xena
-
-- job:
name: tempest-slow-2023-1
parent: tempest-slow-py3
nodeset: openstack-two-node-jammy
@@ -72,12 +60,6 @@
override-checkout: stable/yoga
- job:
- name: tempest-slow-xena
- parent: tempest-slow-py3
- nodeset: openstack-two-node-focal
- override-checkout: stable/xena
-
-- job:
name: tempest-full-py3
parent: devstack-tempest
# This job version is to use the 'full' tox env which
@@ -294,7 +276,6 @@
devstack_services:
neutron-placement: true
neutron-qos: true
- tempest_concurrency: 2
group-vars:
# NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
# the controller and subnode prior to Rocky so we have to make sure the
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index a8c29af..ca63fcc 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -93,7 +93,12 @@
vars:
devstack_localrc:
TEMPEST_USE_TEST_ACCOUNTS: True
-
+ # FIXME(gmann): Nova and Glance have enabled the new defaults and scope
+ # by default in devstack and pre provisioned account code and testing
+ # needs to be move to new RBAC design testing. Until we do that, let's
+ # run these jobs with old defaults.
+ NOVA_ENFORCE_SCOPE: false
+ GLANCE_ENFORCE_SCOPE: false
- job:
name: tempest-full-test-account-no-admin-py3
parent: tempest-full-test-account-py3