Merge "Adds tests for resize server for swap"
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 0adfebd..3ffa68a 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,6 +9,7 @@
Tempest master supports the below OpenStack Releases:
+* 2025.1
* 2024.2
* 2024.1
* 2023.2
diff --git a/releasenotes/notes/change-default-disk-format-0d5230cbb19e3d44.yaml b/releasenotes/notes/change-default-disk-format-0d5230cbb19e3d44.yaml
new file mode 100644
index 0000000..57c5319
--- /dev/null
+++ b/releasenotes/notes/change-default-disk-format-0d5230cbb19e3d44.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ The default value for ``[volume] disk_format``, which specifies the
+ disk format of the image in a copy a volume to image operation,
+ is changed from ``raw`` to ``[raw, qcow2]`` for which the type of
+ the config option also needed to change from ``string`` type to
+ ``list`` type i.e. it accepts multiple values now.
diff --git a/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml b/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml
new file mode 100644
index 0000000..86af60c
--- /dev/null
+++ b/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml
@@ -0,0 +1,17 @@
+---
+prelude: >
+ This release is to tag Tempest for OpenStack 2025.1 release.
+ This release marks the start of 2025.1 release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * 2025.1
+ * 2024.2
+ * 2024.1
+ * 2023.2
+
+ Current development of Tempest is for OpenStack 2025.2 development
+ cycle. Every Tempest commit is also tested against master during
+ the 2025.2 cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a 2025.2 (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack 2025.1 release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 633d90e..058f65f 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,8 @@
:maxdepth: 1
unreleased
+ v43.0.0
+ v42.0.0
v41.0.0
v40.0.0
v39.0.0
diff --git a/releasenotes/source/v42.0.0.rst b/releasenotes/source/v42.0.0.rst
new file mode 100644
index 0000000..ffc375d
--- /dev/null
+++ b/releasenotes/source/v42.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v42.0.0 Release Notes
+=====================
+
+.. release-notes:: 42.0.0 Release Notes
+ :version: 42.0.0
diff --git a/releasenotes/source/v43.0.0.rst b/releasenotes/source/v43.0.0.rst
new file mode 100644
index 0000000..073cd5c
--- /dev/null
+++ b/releasenotes/source/v43.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v43.0.0 Release Notes
+=====================
+
+.. release-notes:: 43.0.0 Release Notes
+ :version: 43.0.0
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index c284370..575d7fc 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -157,12 +157,14 @@
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate = self._create_test_aggregate()
- self.client.add_host(aggregate['id'], host=self.hosts[0])
+ hosts = [host for host in self.hosts if (
+ host not in CONF.compute.target_hosts_to_avoid)]
+ self.client.add_host(aggregate['id'], host=hosts[0])
self.addCleanup(self.client.remove_host, aggregate['id'],
- host=self.hosts[0])
+ host=hosts[0])
self.assertRaises(lib_exc.Conflict, self.client.add_host,
- aggregate['id'], host=self.hosts[0])
+ aggregate['id'], host=hosts[0])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
@@ -171,13 +173,15 @@
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate = self._create_test_aggregate()
- self.client.add_host(aggregate['id'], host=self.hosts[0])
+ hosts = [host for host in self.hosts if (
+ host not in CONF.compute.target_hosts_to_avoid)]
+ self.client.add_host(aggregate['id'], host=hosts[0])
self.addCleanup(self.client.remove_host, aggregate['id'],
- host=self.hosts[0])
+ host=hosts[0])
self.assertRaises(lib_exc.Forbidden,
self.aggregates_client.remove_host,
- aggregate['id'], host=self.hosts[0])
+ aggregate['id'], host=hosts[0])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 8cf44be..8b2bc69 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -109,29 +109,37 @@
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance images_client and from Cinder via tearDownClass.
- image_name = data_utils.rand_name(self.__class__.__name__ + '-Image',
- prefix=CONF.resource_name_prefix)
- body = self.volumes_client.upload_volume(
- self.volume['id'], image_name=image_name,
- disk_format=CONF.volume.disk_format)['os-volume_upload_image']
- image_id = body["image_id"]
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.images_client.delete_image,
- image_id)
- waiters.wait_for_image_status(self.images_client, image_id, 'active')
- # This is required for the optimized upload volume path.
- # New location APIs are async so we need to wait for the location
- # import task to complete.
- # This should work with old location API since we don't fail if there
- # are no tasks for the image
- waiters.wait_for_image_tasks_status(self.images_client,
- image_id, 'success')
- waiters.wait_for_volume_resource_status(self.volumes_client,
- self.volume['id'], 'available')
+ # NOTE: This looks really strange to loop through the disk formats
+ # but similar implementation is done in test_volume_bootable test.
+ # Also there is no trace of ddt usage in tempest so this looks like
+ # the only way.
+ for disk_format in CONF.volume.disk_format:
+ image_name = data_utils.rand_name(
+ self.__class__.__name__ + '-Image',
+ prefix=CONF.resource_name_prefix)
+ body = self.volumes_client.upload_volume(
+ self.volume['id'], image_name=image_name,
+ disk_format=disk_format)['os-volume_upload_image']
+ image_id = body["image_id"]
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.images_client.delete_image,
+ image_id)
+ waiters.wait_for_image_status(self.images_client, image_id,
+ 'active')
+ # This is required for the optimized upload volume path.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if
+ # there are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image_id, 'success')
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ self.volume['id'],
+ 'available')
- image_info = self.images_client.show_image(image_id)
- self.assertEqual(image_name, image_info['name'])
- self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])
+ image_info = self.images_client.show_image(image_id)
+ self.assertEqual(image_name, image_info['name'])
+ self.assertEqual(disk_format, image_info['disk_format'])
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 9b79f38..640441a 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,7 +18,6 @@
from tempest.api.volume import base
from tempest.common import utils
-from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -38,10 +37,7 @@
# Create a volume
kwargs['name'] = v_name
kwargs['metadata'] = metadata
- volume = self.volumes_client.create_volume(**kwargs)['volume']
- self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'available')
+ volume = self.create_volume(wait_until='available', **kwargs)
self.assertEqual(volume['name'], v_name,
"The created volume name is not equal "
"to the requested name")
@@ -103,11 +99,7 @@
params = {'description': new_v_desc,
'availability_zone': volume['availability_zone'],
'size': CONF.volume.volume_size}
- new_volume = self.volumes_client.create_volume(**params)['volume']
- self.addCleanup(self.delete_volume, self.volumes_client,
- new_volume['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- new_volume['id'], 'available')
+ new_volume = self.create_volume(wait_until='available', **params)
params = {'name': volume['name'],
'description': volume['description']}
diff --git a/tempest/config.py b/tempest/config.py
index f9a08ea..9c288ff 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -582,10 +582,13 @@
default=True,
help='Enable special configuration drive with metadata.'),
cfg.ListOpt('scheduler_enabled_filters',
- default=["AvailabilityZoneFilter", "ComputeFilter",
- "ComputeCapabilitiesFilter", "ImagePropertiesFilter",
- "ServerGroupAntiAffinityFilter",
- "ServerGroupAffinityFilter"],
+ default=[
+ "ComputeFilter",
+ "ComputeCapabilitiesFilter",
+ "ImagePropertiesFilter",
+ "ServerGroupAntiAffinityFilter",
+ "ServerGroupAffinityFilter",
+ ],
help="A list of enabled filters that Nova will accept as "
"hints to the scheduler when creating a server. If the "
"default value is overridden in nova.conf by the test "
@@ -1022,9 +1025,9 @@
cfg.StrOpt('vendor_name',
default='Open Source',
help='Backend vendor to target when creating volume types'),
- cfg.StrOpt('disk_format',
- default='raw',
- help='Disk format to use when copying a volume to image'),
+ cfg.ListOpt('disk_format',
+ default=['raw', 'qcow2'],
+ help='Disk format to use when copying a volume to image'),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 47b7812..49f9ebc 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -455,10 +455,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-networking
# Do not run it on ussuri until below issue is fixed
@@ -479,10 +482,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
@@ -525,6 +531,7 @@
- ^.*/2023.2
- ^.*/2024.1
- ^.*/2024.2
+ - ^.*/2025.1
- master
- tempest-integrated-compute
# Do not run it on ussuri until below issue is fixed
@@ -542,6 +549,7 @@
- ^.*/2023.2
- ^.*/2024.1
- ^.*/2024.2
+ - ^.*/2025.1
- master
- tempest-integrated-compute
- openstacksdk-functional-devstack:
@@ -579,10 +587,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-placement
# Do not run it on ussuri until below issue is fixed
@@ -603,10 +614,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
@@ -640,10 +654,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-storage
# Do not run it on ussuri until below issue is fixed
@@ -663,10 +680,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-storage
# Do not run it on ussuri until below issue is fixed
@@ -694,10 +714,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-object-storage
# Do not run it on ussuri until below issue is fixed
@@ -717,10 +740,13 @@
- grenade-skip-level:
branches:
- ^.*/2024.1
- # on current master 2025.1(SLURP) grenade-skip-level-always is voting
+ # on 2025.1(SLURP) grenade-skip-level-always is voting.
# which test stable/2024.1 to 2025.1 upgrade.
+ # As extra testing, we do run it voting on current master(even that is non SLURP).
+ # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
- grenade-skip-level-always:
branches:
+ - ^.*/2025.1
- master
- tempest-integrated-object-storage
# Do not run it on ussuri until below issue is fixed
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 2f21c2d..f044e79 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -45,7 +45,7 @@
# if things are working in latest and oldest it will work in between
# stable branches also. If anything is breaking we will be catching
# those in respective stable branch gate.
- - tempest-full-2024-2:
+ - tempest-full-2025-1:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-2023-2:
irrelevant-files: *tempest-irrelevant-files
@@ -110,8 +110,7 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-full-enforce-scope-new-defaults:
irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest-py3:
- timeout: 9000
+ - nova-ceph-multistore:
irrelevant-files: *tempest-irrelevant-files
- neutron-ovs-grenade-multinode:
irrelevant-files: *tempest-irrelevant-files
@@ -156,8 +155,8 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-full-enforce-scope-new-defaults:
irrelevant-files: *tempest-irrelevant-files
- #- devstack-plugin-ceph-tempest-py3:
- # irrelevant-files: *tempest-irrelevant-files
+ - nova-ceph-multistore:
+ irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
irrelevant-files: *tempest-irrelevant-files
- ironic-tempest-bios-ipmi-direct-tinyipa:
@@ -173,8 +172,6 @@
experimental:
jobs:
- nova-multi-cell
- - nova-ceph-multistore:
- irrelevant-files: *tempest-irrelevant-files
- tempest-with-latest-microversion
- tempest-stestr-master
- tempest-cinder-v2-api:
@@ -201,12 +198,15 @@
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
+ - tempest-full-2025-1
- tempest-full-2024-2
- tempest-full-2024-1
- tempest-full-2023-2
+ - tempest-slow-2025-1
- tempest-slow-2024-2
- tempest-slow-2024-1
- tempest-slow-2023-2
+ - tempest-full-2025-1-extra-tests
- tempest-full-2024-2-extra-tests
- tempest-full-2024-1-extra-tests
- tempest-full-2023-2-extra-tests
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 5785ec6..6409ae3 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,11 @@
# NOTE(gmann): This file includes all stable release jobs definition.
- job:
+ name: tempest-full-2025-1
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-noble
+ override-checkout: stable/2025.1
+
+- job:
name: tempest-full-2024-2
parent: tempest-full-py3
nodeset: openstack-single-node-jammy
@@ -18,6 +24,12 @@
override-checkout: stable/2023.2
- job:
+ name: tempest-full-2025-1-extra-tests
+ parent: tempest-extra-tests
+ nodeset: openstack-single-node-noble
+ override-checkout: stable/2025.1
+
+- job:
name: tempest-full-2024-2-extra-tests
parent: tempest-extra-tests
nodeset: openstack-single-node-jammy
@@ -36,6 +48,12 @@
override-checkout: stable/2023.2
- job:
+ name: tempest-slow-2025-1
+ parent: tempest-slow-py3
+ nodeset: openstack-two-node-noble
+ override-checkout: stable/2025.1
+
+- job:
name: tempest-slow-2024-2
parent: tempest-slow-py3
nodeset: openstack-two-node-jammy