Merge "Use MYSQL_REDUCE_MEMORY in some periodic jobs"
diff --git a/HACKING.rst b/HACKING.rst
index 17e2a49..caf954b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -194,6 +194,13 @@
attribute should be sparingly applied to only the tests that sanity-check the
most essential functionality of an OpenStack cloud.
+Multinode Attribute
+^^^^^^^^^^^^^^^^^^^
+The ``type='multinode'`` attribute is used to signify that a test is desired
+to be executed in a multinode environment. By marking the tests with this
+attribute we can avoid running tests which aren't that beneficial for the
+multinode setup and thus reduce the consumption of resources.
+
Test fixtures and resources
---------------------------
Test level resources should be cleaned-up after the test execution. Clean-up
diff --git a/doc/source/keystone_scopes_and_roles_support.rst b/doc/source/keystone_scopes_and_roles_support.rst
index f446f8c..4d70565 100644
--- a/doc/source/keystone_scopes_and_roles_support.rst
+++ b/doc/source/keystone_scopes_and_roles_support.rst
@@ -203,6 +203,10 @@
cls.az_p_reader_client = (
cls.os_project_reader.availability_zone_client)
+ .. note::
+ 'primary', 'project_admin', 'project_member', and 'project_reader'
+ credentials will be created under same project.
+
#. Project alternate Admin: This is supported and can be requested and used from
the test as below:
@@ -248,6 +252,10 @@
cls.az_p_alt_reader_client = (
cls.os_project_alt_reader.availability_zone_client)
+ .. note::
+ 'alt', 'project_alt_admin', 'project_alt_member', and
+ 'project_alt_reader' credentials will be created under same project.
+
#. Project other roles: This is supported and can be requested and used from
the test as below:
@@ -269,6 +277,16 @@
cls.az_role2_client = (
cls.os_project_my_role2.availability_zone_client)
+ .. note::
+ 'admin' credenatials is considered and kept as legacy admin and
+ will be created under new project. If any test want to test with
+ admin role in projectA and non-admin/admin in projectB then test
+ can request projectA admin using 'admin' or 'project_alt_admin'
+ and non-admin in projectB using 'primary', 'project_member',
+ or 'project_reader'/admin in projectB using 'project_admin'. Many
+ existing tests using the 'admin' with new project to assert on the
+ resource list so we are keeping 'admin' a kind of legacy admin.
+
Pre-Provisioned Credentials
---------------------------
diff --git a/releasenotes/notes/fix-bug-1964509-b742f2c95d854980.yaml b/releasenotes/notes/fix-bug-1964509-b742f2c95d854980.yaml
new file mode 100644
index 0000000..db627de
--- /dev/null
+++ b/releasenotes/notes/fix-bug-1964509-b742f2c95d854980.yaml
@@ -0,0 +1,19 @@
+---
+fixes:
+ - |
+ There was a bug (bug#1964509) in dynamic credentials creation where
+ project credentials with different roles are created with the new
+ projects. Credential of different role of projects must be created
+ within the same project. For exmaple, 'project_admin', 'project_member',
+ 'project_reader', and 'primary', credentials will be created in the
+ same projects. 'alt', 'project_alt_admin', 'project_alt_member',
+ 'project_alt_reader' will be created within the same project.
+
+ 'admin' credenatials is considered and kept as legacy admin and
+ will be created under new project. If any test want to test with
+ admin role in projectA and non-admin/admin in projectB then test
+ can request projectA admin using 'admin' or 'project_alt_admin'
+ and non-admin in projectB using 'primary', 'project_member',
+ or 'project_reader'/admin in projectB using 'project_admin'. Many
+ existing tests using the 'admin' with new project to assert on the
+ resource list so we are keeping 'admin' a kind of legacy admin.
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 1c839eb..388b9b0 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -28,10 +28,16 @@
"""Test servers API"""
create_default_network = True
+ credentials = ['primary', 'project_reader']
+
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
+ if CONF.enforce_scope.nova:
+ cls.reader_client = cls.os_project_reader.servers_client
+ else:
+ cls.reader_client = cls.client
@decorators.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0')
@testtools.skipUnless(CONF.compute_feature_enabled.
@@ -64,9 +70,9 @@
id2 = server['id']
self.addCleanup(self.delete_server, id2)
self.assertNotEqual(id1, id2, "Did not create a new server")
- server = self.client.show_server(id1)['server']
+ server = self.reader_client.show_server(id1)['server']
name1 = server['name']
- server = self.client.show_server(id2)['server']
+ server = self.reader_client.show_server(id2)['server']
name2 = server['name']
self.assertEqual(name1, name2)
@@ -80,7 +86,7 @@
server = self.create_test_server(key_name=key_name,
wait_until='ACTIVE')
self.addCleanup(self.delete_server, server['id'])
- server = self.client.show_server(server['id'])['server']
+ server = self.reader_client.show_server(server['id'])['server']
self.assertEqual(key_name, server['key_name'])
def _update_server_name(self, server_id, status, prefix_name='server'):
@@ -93,7 +99,7 @@
waiters.wait_for_server_status(self.client, server_id, status)
# Verify the name of the server has changed
- server = self.client.show_server(server_id)['server']
+ server = self.reader_client.show_server(server_id)['server']
self.assertEqual(new_name, server['name'])
return server
@@ -128,7 +134,7 @@
waiters.wait_for_server_status(self.client, server['id'], 'ACTIVE')
# Verify the access addresses have been updated
- server = self.client.show_server(server['id'])['server']
+ server = self.reader_client.show_server(server['id'])['server']
self.assertEqual('1.1.1.1', server['accessIPv4'])
self.assertEqual('::babe:202:202', server['accessIPv6'])
@@ -138,7 +144,7 @@
server = self.create_test_server(accessIPv6='2001:2001::3',
wait_until='ACTIVE')
self.addCleanup(self.delete_server, server['id'])
- server = self.client.show_server(server['id'])['server']
+ server = self.reader_client.show_server(server['id'])['server']
self.assertEqual('2001:2001::3', server['accessIPv6'])
@decorators.related_bug('1730756')
@@ -169,12 +175,22 @@
# also. 2.47 APIs schema are on top of 2.9->2.19->2.26 schema so
# below tests cover all of the schema.
+ credentials = ['primary', 'project_reader']
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServerShowV247Test, cls).setup_clients()
+ if CONF.enforce_scope.nova:
+ cls.reader_client = cls.os_project_reader.servers_client
+ else:
+ cls.reader_client = cls.servers_client
+
@decorators.idempotent_id('88b0bdb2-494c-11e7-a919-92ebcb67fe33')
def test_show_server(self):
"""Test getting server detail"""
server = self.create_test_server()
# All fields will be checked by API schema
- self.servers_client.show_server(server['id'])
+ self.reader_client.show_server(server['id'])
@decorators.idempotent_id('8de397c2-57d0-4b90-aa30-e5d668f21a8b')
def test_update_rebuild_list_server(self):
@@ -198,6 +214,16 @@
min_microversion = '2.63'
max_microversion = 'latest'
+ credentials = ['primary', 'project_reader']
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServerShowV263Test, cls).setup_clients()
+ if CONF.enforce_scope.nova:
+ cls.reader_client = cls.os_project_reader.servers_client
+ else:
+ cls.reader_client = cls.servers_client
+
@testtools.skipUnless(CONF.compute.certified_image_ref,
'``[compute]/certified_image_ref`` required to test '
'image certificate validation.')
@@ -214,7 +240,7 @@
wait_until='ACTIVE')
# Check show API response schema
- self.servers_client.show_server(server['id'])['server']
+ self.reader_client.show_server(server['id'])['server']
# Check update API response schema
self.servers_client.update_server(server['id'])
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 1d05f13..b723977 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -819,21 +819,8 @@
# Add a new location
new_loc = {'metadata': {'foo': 'bar'},
'url': CONF.image.http_image}
-
- # NOTE(danms): If glance was unable to fetch the remote image via
- # HTTP, it will return BadRequest. Because this can be transient in
- # CI, we try this a few times before we agree that it has failed
- # for a reason worthy of failing the test.
- for i in range(BAD_REQUEST_RETRIES):
- try:
- self.client.update_image(image['id'], [
- dict(add='/locations/-', value=new_loc)])
- break
- except lib_exc.BadRequest:
- if i + 1 == BAD_REQUEST_RETRIES:
- raise
- else:
- time.sleep(1)
+ self._update_image_with_retries(image['id'], [
+ dict(add='/locations/-', value=new_loc)])
# The image should now be active, with one location that looks
# like we expect
@@ -858,20 +845,14 @@
def test_set_location(self):
self._check_set_location()
- def _check_set_multiple_locations(self):
- image = self._check_set_location()
-
- new_loc = {'metadata': {'speed': '88mph'},
- 'url': '%s#new' % CONF.image.http_image}
-
+ def _update_image_with_retries(self, image, patch):
# NOTE(danms): If glance was unable to fetch the remote image via
# HTTP, it will return BadRequest. Because this can be transient in
# CI, we try this a few times before we agree that it has failed
# for a reason worthy of failing the test.
for i in range(BAD_REQUEST_RETRIES):
try:
- self.client.update_image(image['id'], [
- dict(add='/locations/-', value=new_loc)])
+ self.client.update_image(image, patch)
break
except lib_exc.BadRequest:
if i + 1 == BAD_REQUEST_RETRIES:
@@ -879,6 +860,15 @@
else:
time.sleep(1)
+ def _check_set_multiple_locations(self):
+ image = self._check_set_location()
+
+ new_loc = {'metadata': {'speed': '88mph'},
+ 'url': '%s#new' % CONF.image.http_image}
+ self._update_image_with_retries(image['id'],
+ [dict(add='/locations/-',
+ value=new_loc)])
+
# The image should now have two locations and the last one
# (locations are ordered) should have the new URL.
image = self.client.show_image(image['id'])
@@ -989,8 +979,9 @@
'os_hash_algo': 'sha512'},
'metadata': {},
'url': CONF.image.http_image}
- self.client.update_image(image['id'], [
- dict(add='/locations/-', value=new_loc)])
+ self._update_image_with_retries(image['id'],
+ [dict(add='/locations/-',
+ value=new_loc)])
# Expect that all of our values ended up on the image
image = self.client.show_image(image['id'])
@@ -1017,8 +1008,9 @@
'os_hash_algo': orig_image['os_hash_algo']},
'metadata': {},
'url': '%s#new' % CONF.image.http_image}
- self.client.update_image(orig_image['id'], [
- dict(add='/locations/-', value=new_loc)])
+ self._update_image_with_retries(orig_image['id'],
+ [dict(add='/locations/-',
+ value=new_loc)])
# Setting the same exact values on a new location should work
image = self.client.show_image(orig_image['id'])
@@ -1052,17 +1044,17 @@
# This should always fail due to the mismatch
self.assertRaises(lib_exc.Conflict,
- self.client.update_image,
- orig_image['id'], [
- dict(add='/locations/-', value=new_loc)])
+ self._update_image_with_retries,
+ orig_image['id'],
+ [dict(add='/locations/-', value=new_loc)])
# Now try to add a new location with all of the substitutions,
# which should also fail
new_loc['validation_data'] = values
self.assertRaises(lib_exc.Conflict,
- self.client.update_image,
- orig_image['id'], [
- dict(add='/locations/-', value=new_loc)])
+ self._update_image_with_retries,
+ orig_image['id'],
+ [dict(add='/locations/-', value=new_loc)])
# Make sure nothing has changed on our image after all the
# above failures
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 49f9e22..9ba9949 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -20,6 +20,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib.decorators import cleanup_order
+from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
@@ -126,12 +127,32 @@
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.cleanup(test_utils.call_and_ignore_notfound_exc,
- self.delete_volume, self.volumes_client, volume['id'])
+ self._delete_volume_for_cleanup,
+ self.volumes_client, volume['id'])
if wait_until:
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], wait_until)
return volume
+ @staticmethod
+ def _delete_volume_for_cleanup(volumes_client, volume_id):
+ """Delete a volume (only) for cleanup.
+
+ If it is attached to a server, wait for it to become available,
+ assuming we have already deleted the server and just need nova to
+ complete the delete operation before it is available to be deleted.
+ Otherwise proceed to the regular delete_volume().
+ """
+ try:
+ vol = volumes_client.show_volume(volume_id)['volume']
+ if vol['status'] == 'in-use':
+ waiters.wait_for_volume_resource_status(volumes_client,
+ volume_id,
+ 'available')
+ except lib_exc.NotFound:
+ pass
+ BaseVolumeTest.delete_volume(volumes_client, volume_id)
+
@cleanup_order
def create_snapshot(self, volume_id=1, **kwargs):
"""Wrapper utility that returns a test snapshot."""
@@ -183,15 +204,17 @@
snapshots_client.delete_snapshot(snapshot_id)
snapshots_client.wait_for_resource_deletion(snapshot_id)
- def attach_volume(self, server_id, volume_id):
+ def attach_volume(self, server_id, volume_id, wait_for_detach=True):
"""Attach a volume to a server"""
self.servers_client.attach_volume(
server_id, volumeId=volume_id,
device='/dev/%s' % CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume_id, 'in-use')
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume_id, 'available')
+ if wait_for_detach:
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume_id, 'available',
+ server_id, self.servers_client)
self.addCleanup(self.servers_client.detach_volume, server_id,
volume_id)
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index b3a04f8..95521e7 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -44,12 +44,17 @@
@utils.services('compute')
def test_snapshot_create_delete_with_volume_in_use(self):
"""Test create/delete snapshot from volume attached to server"""
- # Create a test instance
- server = self.create_server(wait_until='SSHABLE')
# NOTE(zhufl) Here we create volume from self.image_ref for adding
# coverage for "creating snapshot from non-blank volume".
volume = self.create_volume(imageRef=self.image_ref)
- self.attach_volume(server['id'], volume['id'])
+
+ # Create a test instance
+ server = self.create_server(wait_until='SSHABLE')
+
+ # NOTE(danms): We are attaching this volume to a server, but we do
+ # not need to block on detach during cleanup because we will be
+ # deleting the server anyway.
+ self.attach_volume(server['id'], volume['id'], wait_for_detach=False)
# Snapshot a volume which attached to an instance with force=False
self.assertRaises(lib_exc.BadRequest, self.create_snapshot,
@@ -81,7 +86,11 @@
# Create a server and attach it
server = self.create_server(wait_until='SSHABLE')
- self.attach_volume(server['id'], self.volume_origin['id'])
+ # NOTE(danms): We are attaching this volume to a server, but we do
+ # not need to block on detach during cleanup because we will be
+ # deleting the server anyway.
+ self.attach_volume(server['id'], self.volume_origin['id'],
+ wait_for_detach=False)
# Now that the volume is attached, create other snapshots
snapshot2 = self.create_snapshot(self.volume_origin['id'], force=True)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 45a7b8a..c5da412 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -303,12 +303,16 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, status):
+def wait_for_volume_resource_status(client, resource_id, status,
+ server_id=None, servers_client=None):
"""Waits for a volume resource to reach a given status.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
+
+ If server_id and servers_client are provided, dump the console for that
+ server on failure.
"""
resource_name = re.findall(
r'(volume|group-snapshot|snapshot|backup|group)',
@@ -330,6 +334,11 @@
raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
if int(time.time()) - start >= client.build_timeout:
+ if server_id and servers_client:
+ console_output = servers_client.get_console_output(
+ server_id)['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server_id, console_output)
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
(resource_name, resource_id, status, resource_status,
diff --git a/tempest/config.py b/tempest/config.py
index 00b394e..dfc0a8e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -975,12 +975,12 @@
default='ecdsa',
help='Type of key to use for ssh connections. '
'Valid types are rsa, ecdsa'),
- cfg.IntOpt('allowed_network_downtime',
- default=5.0,
- help="Allowed VM network connection downtime during live "
- "migration, in seconds. "
- "When the measured downtime exceeds this value, an "
- "exception is raised."),
+ cfg.FloatOpt('allowed_network_downtime',
+ default=5.0,
+ help="Allowed VM network connection downtime during live "
+ "migration, in seconds. "
+ "When the measured downtime exceeds this value, an "
+ "exception is raised."),
]
volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
index 8aed37d..b36c9d6 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
@@ -30,7 +30,7 @@
mac_address = {
'type': 'string',
- 'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ 'pattern': '(?:[a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2}'
}
ip_address = {
diff --git a/tempest/lib/common/cred_client.py b/tempest/lib/common/cred_client.py
index f13d6d0..69798a4 100644
--- a/tempest/lib/common/cred_client.py
+++ b/tempest/lib/common/cred_client.py
@@ -58,6 +58,10 @@
def create_project(self, name, description):
pass
+ @abc.abstractmethod
+ def show_project(self, project_id):
+ pass
+
def _check_role_exists(self, role_name):
try:
roles = self._list_roles()
@@ -118,6 +122,9 @@
name=name, description=description)['tenant']
return tenant
+ def show_project(self, project_id):
+ return self.projects_client.show_tenant(project_id)['tenant']
+
def delete_project(self, project_id):
self.projects_client.delete_tenant(project_id)
@@ -159,6 +166,9 @@
domain_id=self.creds_domain['id'])['project']
return project
+ def show_project(self, project_id):
+ return self.projects_client.show_project(project_id)['project']
+
def delete_project(self, project_id):
self.projects_client.delete_project(project_id)
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index d687eb5..99647d4 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -163,7 +163,8 @@
os.network.PortsClient(),
os.network.SecurityGroupsClient())
- def _create_creds(self, admin=False, roles=None, scope='project'):
+ def _create_creds(self, admin=False, roles=None, scope='project',
+ project_id=None):
"""Create credentials with random name.
Creates user and role assignments on a project, domain, or system. When
@@ -177,6 +178,8 @@
:type roles: list
:param str scope: The scope for the role assignment, may be one of
'project', 'domain', or 'system'.
+ :param str project_id: The project id of already created project
+ for credentials under same project.
:return: Readonly Credentials with network resources
:raises: Exception if scope is invalid
"""
@@ -190,12 +193,20 @@
'system': None
}
if scope == 'project':
- project_name = data_utils.rand_name(
- root, prefix=self.resource_prefix)
- project_desc = project_name + '-desc'
- project = self.creds_client.create_project(
- name=project_name, description=project_desc)
-
+ if not project_id:
+ project_name = data_utils.rand_name(
+ root, prefix=self.resource_prefix)
+ project_desc = project_name + '-desc'
+ project = self.creds_client.create_project(
+ name=project_name, description=project_desc)
+ else:
+ # NOTE(gmann) This is the case where creds are requested
+ # from the existing creds within same project. We should
+ # not create the new project in this case.
+ project = self.creds_client.show_project(project_id)
+ project_name = project['name']
+ LOG.info("Using the existing project %s for scope %s and "
+ "roles: %s", project['id'], scope, roles)
# NOTE(andreaf) User and project can be distinguished from the
# context, having the same ID in both makes it easier to match them
# and debug.
@@ -372,48 +383,78 @@
self.routers_admin_client.add_router_interface(router_id,
subnet_id=subnet_id)
- def get_credentials(self, credential_type, scope=None):
- if not scope and self._creds.get(str(credential_type)):
- credentials = self._creds[str(credential_type)]
- elif scope and (
- self._creds.get("%s_%s" % (scope, str(credential_type)))):
- credentials = self._creds["%s_%s" % (scope, str(credential_type))]
+ def _get_project_id(self, credential_type, scope):
+ same_creds = [['admin'], ['member'], ['reader']]
+ same_alt_creds = [['alt_admin'], ['alt_member'], ['alt_reader']]
+ search_in = []
+ if credential_type in same_creds:
+ search_in = same_creds
+ elif credential_type in same_alt_creds:
+ search_in = same_alt_creds
+ for cred in search_in:
+ found_cred = self._creds.get("%s_%s" % (scope, str(cred)))
+ if found_cred:
+ project_id = found_cred.get("%s_%s" % (scope, 'id'))
+ LOG.debug("Reusing existing project %s from creds: %s ",
+ project_id, found_cred)
+ return project_id
+ return None
+
+ def get_credentials(self, credential_type, scope=None, by_role=False):
+ cred_prefix = ''
+ if by_role:
+ cred_prefix = 'role_'
+ if not scope and self._creds.get(
+ "%s%s" % (cred_prefix, str(credential_type))):
+ credentials = self._creds[
+ "%s%s" % (cred_prefix, str(credential_type))]
+ elif scope and (self._creds.get(
+ "%s%s_%s" % (cred_prefix, scope, str(credential_type)))):
+ credentials = self._creds[
+ "%s%s_%s" % (cred_prefix, scope, str(credential_type))]
else:
LOG.debug("Creating new dynamic creds for scope: %s and "
"credential_type: %s", scope, credential_type)
+ project_id = None
if scope:
- if credential_type in [['admin'], ['alt_admin']]:
+ if scope == 'project':
+ project_id = self._get_project_id(
+ credential_type, 'project')
+ if by_role:
credentials = self._create_creds(
- admin=True, scope=scope)
+ roles=credential_type, scope=scope)
+ elif credential_type in [['admin'], ['alt_admin']]:
+ credentials = self._create_creds(
+ admin=True, scope=scope, project_id=project_id)
elif credential_type in [['alt_member'], ['alt_reader']]:
cred_type = credential_type[0][4:]
if isinstance(cred_type, str):
cred_type = [cred_type]
credentials = self._create_creds(
- roles=cred_type, scope=scope)
- else:
+ roles=cred_type, scope=scope, project_id=project_id)
+ elif credential_type in [['member'], ['reader']]:
credentials = self._create_creds(
- roles=credential_type, scope=scope)
+ roles=credential_type, scope=scope,
+ project_id=project_id)
elif credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
if scope:
- self._creds["%s_%s" %
- (scope, str(credential_type))] = credentials
+ self._creds["%s%s_%s" % (
+ cred_prefix, scope, str(credential_type))] = credentials
else:
- self._creds[str(credential_type)] = credentials
+ self._creds[
+ "%s%s" % (cred_prefix, str(credential_type))] = credentials
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n"
" credentials: %s", credentials)
# NOTE(gmann): For 'domain' and 'system' scoped token, there is no
# project_id so we are skipping the network creation for both
- # scope. How these scoped token can create the network, Nova
- # server or other project mapped resources is one of the open
- # question and discussed a lot in Xena cycle PTG. Once we sort
- # out that then if needed we can update the network creation here.
- if (not scope or scope == 'project'):
+ # scope.
+ # We need to create nework resource once per project.
+ if (not project_id and (not scope or scope == 'project')):
if (self.neutron_available and self.create_networks):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
@@ -422,24 +463,22 @@
LOG.info("Created isolated network resources for:\n"
" credentials: %s", credentials)
else:
- LOG.info("Network resources are not created for scope: %s",
- scope)
+ LOG.info("Network resources are not created for requested "
+ "scope: %s and credentials: %s", scope, credentials)
return credentials
# TODO(gmann): Remove this method in favor of get_project_member_creds()
# after the deprecation phase.
def get_primary_creds(self):
- return self.get_credentials('primary')
+ return self.get_project_member_creds()
- # TODO(gmann): Remove this method in favor of get_project_admin_creds()
- # after the deprecation phase.
def get_admin_creds(self):
return self.get_credentials('admin')
- # TODO(gmann): Replace this method with more appropriate name.
- # like get_project_alt_member_creds()
+ # TODO(gmann): Remove this method in favor of
+ # get_project_alt_member_creds() after the deprecation phase.
def get_alt_creds(self):
- return self.get_credentials('alt')
+ return self.get_project_alt_member_creds()
def get_system_admin_creds(self):
return self.get_credentials(['admin'], scope='system')
@@ -481,9 +520,9 @@
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
# the created credentials set in the dynamic_creds dict.
- creds_name = str(roles)
+ creds_name = "role_%s" % str(roles)
if scope:
- creds_name = "%s_%s" % (scope, str(roles))
+ creds_name = "role_%s_%s" % (scope, str(roles))
exist_creds = self._creds.get(creds_name)
# If force_new flag is True 2 cred sets with the same roles are needed
# handle this by creating a separate index for old one to store it
@@ -492,7 +531,7 @@
new_index = creds_name + '-' + str(len(self._creds))
self._creds[new_index] = exist_creds
del self._creds[creds_name]
- return self.get_credentials(roles, scope=scope)
+ return self.get_credentials(roles, scope=scope, by_role=True)
def _clear_isolated_router(self, router_id, router_name):
client = self.routers_admin_client
@@ -553,31 +592,20 @@
if not self._creds:
return
self._clear_isolated_net_resources()
+ project_ids = set()
for creds in self._creds.values():
+ # NOTE(gmann): With new RBAC personas, we can have single project
+ # and multiple user created under it, to avoid conflict let's
+ # cleanup the projects at the end.
+ # Adding project if id is not None, means leaving domain and
+ # system creds.
+ if creds.project_id:
+ project_ids.add(creds.project_id)
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
LOG.warning("user with name: %s not found for delete",
creds.username)
- if creds.tenant_id:
- # NOTE(zhufl): Only when neutron's security_group ext is
- # enabled, cleanup_default_secgroup will not raise error. But
- # here cannot use test_utils.is_extension_enabled for it will
- # cause "circular dependency". So here just use try...except to
- # ensure tenant deletion without big changes.
- try:
- if self.neutron_available:
- self.cleanup_default_secgroup(
- self.security_groups_admin_client, creds.tenant_id)
- except lib_exc.NotFound:
- LOG.warning("failed to cleanup tenant %s's secgroup",
- creds.tenant_name)
- try:
- self.creds_client.delete_project(creds.tenant_id)
- except lib_exc.NotFound:
- LOG.warning("tenant with name: %s not found for delete",
- creds.tenant_name)
-
# if cred is domain scoped, delete ephemeral domain
# do not delete default domain
if (hasattr(creds, 'domain_id') and
@@ -587,6 +615,28 @@
except lib_exc.NotFound:
LOG.warning("domain with name: %s not found for delete",
creds.domain_name)
+ for project_id in project_ids:
+ # NOTE(zhufl): Only when neutron's security_group ext is
+ # enabled, cleanup_default_secgroup will not raise error. But
+ # here cannot use test_utils.is_extension_enabled for it will
+ # cause "circular dependency". So here just use try...except to
+ # ensure tenant deletion without big changes.
+ LOG.info("Deleting project and security group for project: %s",
+ project_id)
+
+ try:
+ if self.neutron_available:
+ self.cleanup_default_secgroup(
+ self.security_groups_admin_client, project_id)
+ except lib_exc.NotFound:
+ LOG.warning("failed to cleanup tenant %s's secgroup",
+ project_id)
+ try:
+ self.creds_client.delete_project(project_id)
+ except lib_exc.NotFound:
+ LOG.warning("tenant with id: %s not found for delete",
+ project_id)
+
self._creds = {}
def is_multi_user(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f4f37b0..e6c6eb6 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -275,7 +275,7 @@
LOG.debug("Downtime seconds measured with downtime_meter = %r",
downtime)
allowed_downtime = CONF.validation.allowed_network_downtime
- self.assertLess(
+ self.assertLessEqual(
downtime, allowed_downtime,
"Downtime of {} seconds is higher than expected '{}'".format(
downtime, allowed_downtime))
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 2695048..93c949e 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -386,6 +386,29 @@
mock_sleep.assert_called_once_with(1)
@mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_timeout_console(self, mock_sleep):
+ # Tests that the wait method gets the server console log if the
+ # timeout is hit.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1)
+ servers_client = mock.Mock()
+ servers_client.get_console_output.return_value = {
+ 'output': 'console log'}
+ volume = {'volume': {'status': 'detaching'}}
+ mock_show = mock.Mock(return_value=volume)
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available',
+ server_id='someserver',
+ servers_client=servers_client)
+ servers_client.get_console_output.assert_called_once_with(
+ 'someserver')
+
+ @mock.patch.object(time, 'sleep')
def test_wait_for_volume_status_error_extending(self, mock_sleep):
# Tests that the wait method raises VolumeExtendErrorException if
# the volume status is 'error_extending'.
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index b4b1b91..d3d01c0 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -60,6 +60,7 @@
fake_response = fake_identity._fake_v2_response
tenants_client_class = tenants_client.TenantsClient
delete_tenant = 'delete_tenant'
+ create_tenant = 'create_tenant'
def setUp(self):
super(TestDynamicCredentialProvider, self).setUp()
@@ -140,7 +141,9 @@
return_value=(rest_client.ResponseBody
(200, {'roles': [
{'id': '1', 'name': 'FakeRole'},
- {'id': '2', 'name': 'member'}]}))))
+ {'id': '2', 'name': 'member'},
+ {'id': '3', 'name': 'reader'},
+ {'id': '4', 'name': 'admin'}]}))))
return roles_fix
def _mock_list_ec2_credentials(self, user_id, tenant_id):
@@ -191,6 +194,205 @@
self.assertEqual(primary_creds.tenant_id, '1234')
self.assertEqual(primary_creds.user_id, '1234')
+ def _request_and_check_second_creds(
+ self, creds_obj, func, creds_to_compare,
+ show_mock, sm_count=1, sm_count_in_diff_project=0,
+ same_project_request=True, **func_kwargs):
+ self._mock_user_create('111', 'fake_user')
+ with mock.patch.object(creds_obj.creds_client,
+ 'create_project') as create_mock:
+ create_mock.return_value = {'id': '22', 'name': 'fake_project'}
+ new_creds = func(**func_kwargs)
+ if same_project_request:
+ # Check that with second creds request, create_project is not
+ # called and show_project is called. Which means new project is
+ # not created for the second requested creds instead new user is
+ # created under existing project.
+ self.assertEqual(len(create_mock.mock_calls), 0)
+ self.assertEqual(len(show_mock.mock_calls), sm_count)
+ # Verify project name and id is same as creds_to_compare
+ self.assertEqual(creds_to_compare.tenant_name,
+ new_creds.tenant_name)
+ self.assertEqual(creds_to_compare.tenant_id,
+ new_creds.tenant_id)
+ else:
+ # Check that with different project creds request, create_project
+ # is called and show_project is not called. Which means new project
+ # is created for this new creds request.
+ self.assertEqual(len(create_mock.mock_calls), 1)
+ self.assertEqual(len(show_mock.mock_calls),
+ sm_count_in_diff_project)
+ # Verify project name and id is not same as creds_to_compare
+ self.assertNotEqual(creds_to_compare.tenant_name,
+ new_creds.tenant_name)
+ self.assertNotEqual(creds_to_compare.tenant_id,
+ new_creds.tenant_id)
+ self.assertEqual(new_creds.tenant_name, 'fake_project')
+ self.assertEqual(new_creds.tenant_id, '22')
+ # Verify new user name and id
+ self.assertEqual(new_creds.username, 'fake_user')
+ self.assertEqual(new_creds.user_id, '111')
+ return new_creds
+
+ @mock.patch('tempest.lib.common.rest_client.RestClient')
+ def _creds_within_same_project(self, MockRestClient, test_alt_creds=False):
+ creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ if test_alt_creds:
+ admin_func = creds.get_project_alt_admin_creds
+ member_func = creds.get_project_alt_member_creds
+ reader_func = creds.get_project_alt_reader_creds
+ else:
+ admin_func = creds.get_project_admin_creds
+ member_func = creds.get_project_member_creds
+ reader_func = creds.get_project_reader_creds
+ self._mock_assign_user_role()
+ self._mock_list_role()
+ self._mock_user_create('11', 'fake_user1')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ with mock.patch.object(creds.creds_client,
+ 'create_project') as create_mock:
+ create_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ member_creds = member_func()
+ # Check that with first creds request, create_project is called and
+ # show_project is not called. Which means new project is created for
+ # the requested creds.
+ self.assertEqual(len(create_mock.mock_calls), 1)
+ self.assertEqual(len(show_mock.mock_calls), 0)
+ # Verify project, user name and IDs
+ self.assertEqual(member_creds.username, 'fake_user1')
+ self.assertEqual(member_creds.tenant_name, 'fake_project1')
+ self.assertEqual(member_creds.tenant_id, '21')
+ self.assertEqual(member_creds.user_id, '11')
+
+ # Now request for the project reader creds which should not create new
+ # project instead should use the project_id of member_creds already
+ # created project.
+ self._request_and_check_second_creds(
+ creds, reader_func, member_creds, show_mock)
+
+ # Now request for the project admin creds which should not create new
+ # project instead should use the project_id of member_creds already
+ # created project.
+ self._request_and_check_second_creds(
+ creds, admin_func, member_creds, show_mock, sm_count=2)
+
+ def test_creds_within_same_project(self):
+ self._creds_within_same_project()
+
+ def test_alt_creds_within_same_project(self):
+ self._creds_within_same_project(test_alt_creds=True)
+
+ @mock.patch('tempest.lib.common.rest_client.RestClient')
+ def test_creds_in_different_project(self, MockRestClient):
+ creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ self._mock_assign_user_role()
+ self._mock_list_role()
+ self._mock_user_create('11', 'fake_user1')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ with mock.patch.object(creds.creds_client,
+ 'create_project') as create_mock:
+ create_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ member_creds = creds.get_project_member_creds()
+ # Check that with first creds request, create_project is called and
+ # show_project is not called. Which means new project is created for
+ # the requested creds.
+ self.assertEqual(len(create_mock.mock_calls), 1)
+ self.assertEqual(len(show_mock.mock_calls), 0)
+ # Verify project, user name and IDs
+ self.assertEqual(member_creds.username, 'fake_user1')
+ self.assertEqual(member_creds.tenant_name, 'fake_project1')
+ self.assertEqual(member_creds.tenant_id, '21')
+ self.assertEqual(member_creds.user_id, '11')
+
+ # Now request for the project alt reader creds which should create
+ # new project as this request is for alt creds.
+ alt_reader_creds = self._request_and_check_second_creds(
+ creds, creds.get_project_alt_reader_creds,
+ member_creds, show_mock, same_project_request=False)
+
+ # Check that with second creds request, create_project is not called
+ # and show_project is called. Which means new project is not created
+ # for the second requested creds instead new user is created under
+ # existing project.
+ self._request_and_check_second_creds(
+ creds, creds.get_project_reader_creds, member_creds, show_mock)
+
+ # Now request for the project alt member creds which should not create
+ # new project instead use the alt project already created for
+ # alt_reader creds.
+ show_mock.return_value = {
+ 'id': alt_reader_creds.tenant_id,
+ 'name': alt_reader_creds.tenant_name}
+ self._request_and_check_second_creds(
+ creds, creds.get_project_alt_member_creds,
+ alt_reader_creds, show_mock, sm_count=2,
+ same_project_request=True)
+
+ @mock.patch('tempest.lib.common.rest_client.RestClient')
+ def test_creds_by_role_in_different_project(self, MockRestClient):
+ creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ self._mock_assign_user_role()
+ self._mock_list_role()
+ self._mock_user_create('11', 'fake_user1')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ with mock.patch.object(creds.creds_client,
+ 'create_project') as create_mock:
+ create_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ member_creds = creds.get_project_member_creds()
+ # Check that with first creds request, create_project is called and
+ # show_project is not called. Which means new project is created for
+ # the requested creds.
+ self.assertEqual(len(create_mock.mock_calls), 1)
+ self.assertEqual(len(show_mock.mock_calls), 0)
+ # Verify project, user name and IDs
+ self.assertEqual(member_creds.username, 'fake_user1')
+ self.assertEqual(member_creds.tenant_name, 'fake_project1')
+ self.assertEqual(member_creds.tenant_id, '21')
+ self.assertEqual(member_creds.user_id, '11')
+ # Check that with second creds request, create_project is not called
+ # and show_project is called. Which means new project is not created
+ # for the second requested creds instead new user is created under
+ # existing project.
+ self._request_and_check_second_creds(
+ creds, creds.get_project_reader_creds, member_creds, show_mock)
+ # Now request the creds by role which should create new project.
+ self._request_and_check_second_creds(
+ creds, creds.get_creds_by_roles, member_creds, show_mock,
+ sm_count_in_diff_project=1, same_project_request=False,
+ roles=['member'], scope='project')
+
+ @mock.patch('tempest.lib.common.rest_client.RestClient')
+ def test_legacy_admin_creds_in_different_project(self, MockRestClient):
+ creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+ self._mock_assign_user_role()
+ self._mock_list_role()
+ self._mock_user_create('11', 'fake_user1')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ with mock.patch.object(creds.creds_client,
+ 'create_project') as create_mock:
+ create_mock.return_value = {'id': '21', 'name': 'fake_project1'}
+ member_creds = creds.get_project_member_creds()
+ # Check that with first creds request, create_project is called and
+ # show_project is not called. Which means new project is created for
+ # the requested creds.
+ self.assertEqual(len(create_mock.mock_calls), 1)
+ self.assertEqual(len(show_mock.mock_calls), 0)
+ # Verify project, user name and IDs
+ self.assertEqual(member_creds.username, 'fake_user1')
+ self.assertEqual(member_creds.tenant_name, 'fake_project1')
+ self.assertEqual(member_creds.tenant_id, '21')
+ self.assertEqual(member_creds.user_id, '11')
+
+ # Now request for the legacy admin creds which should create
+ # new project instead of using project member creds project.
+ self._request_and_check_second_creds(
+ creds, creds.get_admin_creds,
+ member_creds, show_mock, same_project_request=False)
+
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
@@ -321,7 +523,8 @@
@mock.patch('tempest.lib.common.rest_client.RestClient')
def _test_get_same_role_creds_with_project_scope(self, MockRestClient,
- scope=None):
+ scope=None,
+ force_new=False):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_2_roles()
self._mock_user_create('1234', 'fake_role_user')
@@ -329,7 +532,7 @@
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock:
role_creds = creds.get_creds_by_roles(
- roles=['role1', 'role2'], scope=scope)
+ roles=['role1', 'role2'], force_new=force_new, scope=scope)
calls = user_mock.mock_calls
# Assert that the role creation is called with the 2 specified roles
self.assertEqual(len(calls), 2)
@@ -338,13 +541,18 @@
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock1:
role_creds_new = creds.get_creds_by_roles(
- roles=['role1', 'role2'], scope=scope)
+ roles=['role1', 'role2'], force_new=force_new, scope=scope)
calls = user_mock1.mock_calls
+ # With force_new, assert that new creds are created
+ if force_new:
+ self.assertEqual(len(calls), 2)
+ self.assertNotEqual(role_creds, role_creds_new)
# Assert that previously created creds are return and no call to
- # role creation.
- self.assertEqual(len(calls), 0)
+ # role creation
# Check if previously created creds are returned.
- self.assertEqual(role_creds, role_creds_new)
+ else:
+ self.assertEqual(len(calls), 0)
+ self.assertEqual(role_creds, role_creds_new)
def test_get_same_role_creds_with_project_scope(self):
self._test_get_same_role_creds_with_project_scope(scope='project')
@@ -352,6 +560,13 @@
def test_get_same_role_creds_with_default_scope(self):
self._test_get_same_role_creds_with_project_scope()
+ def test_get_same_role_creds_with_project_scope_force_new(self):
+ self._test_get_same_role_creds_with_project_scope(
+ scope='project', force_new=True)
+
+ def test_get_same_role_creds_with_default_scope_force_new(self):
+ self._test_get_same_role_creds_with_project_scope(force_new=True)
+
@mock.patch('tempest.lib.common.rest_client.RestClient')
def _test_get_different_role_creds_with_project_scope(
self, MockRestClient, scope=None):
@@ -391,8 +606,12 @@
self._mock_assign_user_role()
self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
- self._mock_user_create('1234', 'fake_prim_user')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '1234', 'name': 'fake_prim_tenant'}
+ self._mock_user_create('1234', 'fake_project1_user')
creds.get_primary_creds()
+ self._mock_user_create('12341', 'fake_project1_user')
+ creds.get_project_admin_creds()
self._mock_tenant_create('12345', 'fake_alt_tenant')
self._mock_user_create('12345', 'fake_alt_user')
creds.get_alt_creds()
@@ -407,10 +626,11 @@
creds.clear_creds()
# Verify user delete calls
calls = user_mock.mock_calls
- self.assertEqual(len(calls), 3)
+ self.assertEqual(len(calls), 4)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
+ self.assertIn('12341', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify tenant delete calls
@@ -512,6 +732,9 @@
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
+ show_mock = self.patchobject(creds.creds_client, 'show_project')
+ show_mock.return_value = {'id': '1234', 'name': 'fake_prim_tenant'}
+ self._mock_user_create('12341', 'fake_project1_user')
self._mock_network_create(creds, '1234', 'fake_net')
self._mock_subnet_create(creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
@@ -519,6 +742,7 @@
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
creds.get_primary_creds()
+ creds.get_project_admin_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
router_interface_mock.reset_mock()
# Create alternate tenant and network
@@ -779,6 +1003,7 @@
fake_response = fake_identity._fake_v3_response
tenants_client_class = tenants_client.ProjectsClient
delete_tenant = 'delete_project'
+ create_tenant = 'create_project'
def setUp(self):
super(TestDynamicCredentialProviderV3, self).setUp()
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 63f1783..4f21956 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -74,7 +74,11 @@
parent: devstack-tempest
# This job version is with swift enabled on py3
# as swift is ready on py3 from stable/ussuri onwards.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+ # As this use 'integrated-full' tox env which is not
+ # available in old tempest used till stable/wallaby,
+ # this job definition is only for stable/xena onwards
+ # and separate job definition until stable/wallaby
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria|wallaby)).*$
description: |
Base integration test with Neutron networking, horizon, swift enable,
and py3.
@@ -409,20 +413,20 @@
- tempest-integrated-networking
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
gate:
jobs:
- grenade
- tempest-integrated-networking
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
- project-template:
name: integrated-gate-compute
@@ -446,15 +450,15 @@
branches: ^stable/(wallaby|xena|yoga).*$
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
gate:
jobs:
- tempest-integrated-compute
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
periodic-weekly:
jobs:
# centos-9-stream is tested from zed release onwards
@@ -476,20 +480,20 @@
- tempest-integrated-placement
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/ussuri).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
gate:
jobs:
- grenade
- tempest-integrated-placement
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
- project-template:
name: integrated-gate-storage
@@ -506,20 +510,20 @@
- tempest-integrated-storage
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
gate:
jobs:
- grenade
- tempest-integrated-storage
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
- project-template:
name: integrated-gate-object-storage
@@ -534,17 +538,17 @@
- tempest-integrated-object-storage
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
gate:
jobs:
- grenade
- tempest-integrated-object-storage
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
- # and job is broken on wallaby branch due to the issue
+ # and job is broken up to wallaby branch due to the issue
# described in https://review.opendev.org/872341
- openstacksdk-functional-devstack:
- branches: ^(?!stable/(ussuri|wallaby)).*$
+ branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index d20186e..3df61d8 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -156,6 +156,11 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all:
irrelevant-files: *tempest-irrelevant-files
+ - tempest-slow-parallel
+ - tempest-full-parallel
+ - tempest-full-zed-extra-tests
+ - tempest-full-yoga-extra-tests
+ - tempest-full-xena-extra-tests
- neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files: *tempest-irrelevant-files
- nova-tempest-v2-api:
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index f70e79c..8aeb748 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -56,6 +56,36 @@
- job:
name: tempest-full-py3
parent: devstack-tempest
+ # This job version is to use the 'full' tox env which
+ # is available for stable/ussuri to stable/wallaby also.
+ branches:
+ - stable/ussuri
+ - stable/victoria
+ - stable/wallaby
+ description: |
+ Base integration test with Neutron networking, horizon, swift enable,
+ and py3.
+ Former names for this job where:
+ * legacy-tempest-dsvm-py35
+ * gate-tempest-dsvm-py35
+ required-projects:
+ - openstack/horizon
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ # Enbale horizon so that we can run horizon test.
+ horizon: true
+
+- job:
+ name: tempest-full-py3
+ parent: devstack-tempest
# This job version is with swift disabled on py3
# as swift was not ready on py3 until stable/train.
branches: