Merge "Add qcow2 format to upload image test"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 33e75ff..e58753b 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -458,6 +458,14 @@
 
   .. _2.96: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-2024-1-caracal-and-2024-2-dalmatian
 
+  * `2.98`_
+
+  .. _2.98: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#microversion-2-98
+
+  * `2.100`_
+
+  .. _2.100: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#microversion-2-100
+
 * Volume
 
   * `3.3`_
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 0adfebd..3ffa68a 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,6 +9,7 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* 2025.1
 * 2024.2
 * 2024.1
 * 2023.2
diff --git a/etc/accounts.yaml.sample b/etc/accounts.yaml.sample
index 3dbed79..702cc6f 100644
--- a/etc/accounts.yaml.sample
+++ b/etc/accounts.yaml.sample
@@ -22,6 +22,12 @@
 #
 # The value of domain_[id|name] is used for project_domain_[id|name] if not
 # specified and user_domain_[id|name] if not specified.
+#
+# When specifying domain-scoped accounts, domain_[id|name] must be present
+# and project_[id|name] must be absent.
+#
+# When specifying system-scoped accounts, value of system can be anything
+# (for now), and neither project_[id|name] nor domain_[id|name] must be present
 
 - username: 'user_1'
   tenant_name: 'test_tenant_1'
@@ -40,6 +46,22 @@
     - 'not_an_admin'
     - 'an_admin'
 
+# To specify a domain-scoped admin
+- username: 'user_3'
+  user_domain_name: 'test_domain'
+  password: 'test_password'
+  domain_name: 'test_domain'
+  roles:
+  - 'admin'
+
+# To specify a system-scoped admin
+- username: 'user_4'
+  user_domain_name: 'test_domain'
+  password: 'test_password'
+  system: 'all'
+  roles:
+  - 'admin'
+
 # To specify a user has a role specified in the config file you can use the
 # type field to specify it, valid values are admin, operator, and reseller_admin
 - username: 'swift_pseudo_admin_user_1'
diff --git a/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
index 58b161f..313b276 100644
--- a/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
+++ b/releasenotes/notes/deprecate-spice-rdp-console-config-f2af173552axfb72.yaml
@@ -1,6 +1,10 @@
 ---
 deprecations:
   - |
-    The config options ``CONF.compute.spice_console`` and ``CONF.compute.rdp_console``
-    are deprecated because test cases using them are removed.
-    We can add them back when adding the test cases again.
+    The config option ``CONF.compute.rdp_console``
+    is deprecated because test cases using it have been removed.
+    We can add it back when adding the test cases again.
+  - |
+    The config option ``CONF.compute.spice_console`` was previously listed as
+    deprecated, but is now back in active use to support the testing of SPICE consoles
+    in Nova.
diff --git a/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml b/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml
new file mode 100644
index 0000000..86af60c
--- /dev/null
+++ b/releasenotes/notes/tempest-2024-2-release-e706f62c7e841bd0.yaml
@@ -0,0 +1,17 @@
+---
+prelude: >
+    This release is to tag Tempest for OpenStack 2025.1 release.
+    This release marks the start of 2025.1 release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * 2025.1
+    * 2024.2
+    * 2024.1
+    * 2023.2
+
+    Current development of Tempest is for OpenStack 2025.2 development
+    cycle. Every Tempest commit is also tested against master during
+    the 2025.2 cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a 2025.2 (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack 2025.1 release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 633d90e..058f65f 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,8 @@
    :maxdepth: 1
 
    unreleased
+   v43.0.0
+   v42.0.0
    v41.0.0
    v40.0.0
    v39.0.0
diff --git a/releasenotes/source/v42.0.0.rst b/releasenotes/source/v42.0.0.rst
new file mode 100644
index 0000000..ffc375d
--- /dev/null
+++ b/releasenotes/source/v42.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v42.0.0 Release Notes
+=====================
+
+.. release-notes:: 42.0.0 Release Notes
+   :version: 42.0.0
diff --git a/releasenotes/source/v43.0.0.rst b/releasenotes/source/v43.0.0.rst
new file mode 100644
index 0000000..073cd5c
--- /dev/null
+++ b/releasenotes/source/v43.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v43.0.0 Release Notes
+=====================
+
+.. release-notes:: 43.0.0 Release Notes
+   :version: 43.0.0
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index f52d4c0..c933c80 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -65,6 +65,18 @@
                           self.s1_id,
                           flavor_ref['id'])
 
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
+    def test_restore_server_invalid_state(self):
+        """Restore-deleting a server not in 'soft-delete' state should fail
+
+        We can restore a soft deleted server, but can't restore a server that
+        is not in 'soft-delete' state.
+        """
+        self.assertRaises(lib_exc.Conflict,
+                          self.client.restore_soft_deleted_server,
+                          self.s1_id)
+
     @decorators.idempotent_id('7368a427-2f26-4ad9-9ba9-911a0ec2b0db')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index c5d5b19..e0290e4 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -110,6 +110,8 @@
 
         Creates two servers in an anti-affinity server group and
         asserts the servers are in the group and on different hosts.
+
+        Uses the /servers multi-create API.
         """
         hosts = self._create_servers_with_group('anti-affinity')
         hostnames = list(hosts.values())
@@ -126,6 +128,8 @@
 
         Creates two servers in an affinity server group and
         asserts the servers are in the group and on same host.
+
+        Uses the /servers multi-create API.
         """
         hosts = self._create_servers_with_group('affinity')
         hostnames = list(hosts.values())
diff --git a/tempest/api/compute/admin/test_spice.py b/tempest/api/compute/admin/test_spice.py
new file mode 100644
index 0000000..f09012d
--- /dev/null
+++ b/tempest/api/compute/admin/test_spice.py
@@ -0,0 +1,153 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import socket
+import struct
+import urllib.parse as urlparse
+
+from tempest.api.compute import base
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class SpiceDirectConsoleTestJSON(base.BaseV2ComputeAdminTest):
+    """Test the spice-direct console"""
+
+    create_default_network = True
+
+    min_microversion = '2.99'
+    max_microversion = 'latest'
+
+    # SPICE client protocol constants
+    magic = b'REDQ'
+    major = 2
+    minor = 2
+    main_channel = 1
+    common_caps = 11  # AuthSelection, AuthSpice, MiniHeader
+    channel_caps = 9  # SemiSeamlessMigrate, SeamlessMigrate
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+        if not CONF.compute_feature_enabled.spice_console:
+            raise cls.skipException('SPICE console feature is disabled.')
+
+    def tearDown(self):
+        super().tearDown()
+        # NOTE(zhufl): Because server_check_teardown will raise Exception
+        # which will prevent other cleanup steps from being executed, so
+        # server_check_teardown should be called after super's tearDown.
+        self.server_check_teardown()
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.servers_client
+
+    @classmethod
+    def resource_setup(cls):
+        super().resource_setup()
+        cls.server = cls.create_test_server(wait_until="ACTIVE")
+
+    @decorators.idempotent_id('80f4460d-1a06-403c-9e93-cf434c70be05')
+    def test_spice_direct(self):
+        """Test accessing spice-direct console of server"""
+
+        # Request a spice-direct console and validate the result. Any user can
+        # do this.
+        body = self.servers_client.get_remote_console(
+            self.server['id'], console_type='spice-direct', protocol='spice')
+
+        console_url = body['remote_console']['url']
+        parts = urlparse.urlparse(console_url)
+        qparams = urlparse.parse_qs(parts.query)
+        self.assertIn('token', qparams)
+        self.assertNotEmpty(qparams['token'])
+        self.assertEqual(1, len(qparams['token']))
+
+        self.assertEqual('spice', body['remote_console']['protocol'])
+        self.assertEqual('spice-direct', body['remote_console']['type'])
+
+        # For reasons best know to the python developers, the qparams values
+        # are lists as documented at
+        # https://docs.python.org/3/library/urllib.parse.html
+        token = qparams['token'][0]
+
+        # Turn that console token into hypervisor connection details. Only
+        # admins can do this because its expected that the request is coming
+        # from a proxy and we don't want to expose intimate hypervisor details
+        # to all users.
+        body = self.admin_servers_client.get_console_auth_token_details(
+            token)
+
+        console = body['console']
+        self.assertEqual(self.server['id'], console['instance_uuid'])
+        self.assertIn('port', console)
+        self.assertIn('tls_port', console)
+        self.assertIsNone(console['internal_access_path'])
+
+        # Connect to the specified non-TLS port and verify we get back
+        # a SPICE protocol greeting
+        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        sock.connect((console['host'], console['port']))
+
+        # Send a client greeting
+        #
+        # ---- SpiceLinkMess ----
+        # 4s    UINT32 magic value, must be REDQ
+        # I     UINT32 major_version, must be 2
+        # I     UINT32 minor_version, must be 2
+        # I     UINT32 size number of bytes following this field to the end
+        #              of this message.
+        # I     UINT32 connection_id. In case of a new session (i.e., channel
+        #              type is SPICE_CHANNEL_MAIN) this field is set to zero,
+        #              and in response the server will allocate session id
+        #              and will send it via the SpiceLinkReply message. In
+        #              case of all other channel types, this field will be
+        #              equal to the allocated session id.
+        # B     UINT8  channel_type, we use main
+        # B     UINT8  channel_id to connect to
+        # I     UINT32 num_common_caps number of common client channel
+        #              capabilities words
+        # I     UINT32 num_channel_caps number of specific client channel
+        #              capabilities words
+        # I     UINT32 caps_offset location of the start of the capabilities
+        #              vector given by the bytes offset from the “size”
+        #              member (i.e., from the address of the “connection_id”
+        #              member).
+        # ...          capabilities
+        sock.sendall(struct.pack(
+            '<4sIIIIBBIIIII', self.magic, self.major, self.minor, 42 - 16,
+            0, self.main_channel, 0, 1, 1, 18, self.common_caps,
+            self.channel_caps))
+
+        # ---- SpiceLinkReply ----
+        # 4s     UINT32 magic value, must be equal to SPICE_MAGIC
+        # I      UINT32 major_version, must be equal to SPICE_VERSION_MAJOR
+        # I      UINT32 minor_version, must be equal to SPICE_VERSION_MINOR
+        # I      UINT32 size number of bytes following this field to the end
+        #               of this message.
+        # I      UINT32 error code
+        # ...
+        buffered = sock.recv(20)
+        self.assertIsNotNone(buffered)
+        self.assertEqual(20, len(buffered))
+
+        magic, major, minor, _, error = struct.unpack_from('<4sIIII', buffered)
+        self.assertEqual(b'REDQ', magic)
+        self.assertEqual(2, major)
+        self.assertEqual(2, minor)
+        self.assertEqual(0, error)
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index e7e84d6..ea3a710 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -276,9 +276,63 @@
     max_microversion = 'latest'
 
     @decorators.idempotent_id('4eee1ffe-9e00-4c99-a431-0d3e0f323a8f')
-    def test_list_show_server_296(self):
-        server = self.create_test_server()
+    def test_list_show_update_rebuild_server_296(self):
+        server = self.create_test_server(wait_until='ACTIVE')
         # Checking list API response schema.
         self.servers_client.list_servers(detail=True)
         # Checking show API response schema
         self.servers_client.show_server(server['id'])
+        # Checking update API response schema
+        self.servers_client.update_server(server['id'])
+        # Check rebuild API response schema
+        self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+
+
+class ServersListShow298Test(base.BaseV2ComputeTest):
+    """Test compute server with microversion >= 2.98"""
+
+    min_microversion = '2.98'
+    max_microversion = 'latest'
+
+    @decorators.idempotent_id('3981e496-3bf7-4015-b807-63ffee7c520c')
+    def test_list_show_update_rebuild_server_298(self):
+        server = self.create_test_server(wait_until='ACTIVE')
+        # Check list details API response schema
+        self.servers_client.list_servers(detail=True)
+        # Check show API response schema
+        self.servers_client.show_server(server['id'])
+        # Checking update API response schema
+        self.servers_client.update_server(server['id'])
+        # Check rebuild API response schema
+        self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+
+
+class ServersListShow2100Test(base.BaseV2ComputeTest):
+    """Test compute server with microversion >= than 2.100
+
+    This test tests the Server APIs response schema for 2.100 microversion.
+    No specific assert or behaviour verification is needed.
+    """
+
+    min_microversion = '2.100'
+    max_microversion = 'latest'
+
+    @decorators.idempotent_id('2c3a8270-e6f7-4400-af0f-db003c117e48')
+    def test_list_show_rebuild_update_server_2100(self):
+        server = self.create_test_server(wait_until='ACTIVE')
+        # Checking list API response schema.
+        self.servers_client.list_servers(detail=True)
+        # Checking show API response schema
+        self.servers_client.show_server(server['id'])
+        # Checking update API response schema
+        self.servers_client.update_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+        # Check rebuild API response schema
+        self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 22fe54d..fa40629 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -472,18 +472,6 @@
                           self.client.restore_soft_deleted_server,
                           nonexistent_server)
 
-    @decorators.attr(type=['negative'])
-    @decorators.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
-    def test_restore_server_invalid_state(self):
-        """Restore-deleting a server not in 'soft-delete' state should fail
-
-        We can restore a soft deleted server, but can't restore a server that
-        is not in 'soft-delete' state.
-        """
-        self.assertRaises(lib_exc.Conflict,
-                          self.client.restore_soft_deleted_server,
-                          self.server_id)
-
     @decorators.idempotent_id('abca56e2-a892-48ea-b5e5-e07e69774816')
     @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
                           'Shelve is not available.')
diff --git a/tempest/api/image/v2/test_images_formats.py b/tempest/api/image/v2/test_images_formats.py
index f0dec90..520a215 100644
--- a/tempest/api/image/v2/test_images_formats.py
+++ b/tempest/api/image/v2/test_images_formats.py
@@ -90,14 +90,18 @@
     @decorators.idempotent_id('a245fcbe-63ce-4dc1-a1d0-c16d76d9e6df')
     def test_accept_usable_formats(self):
         if self.imgdef['usable']:
-            if self.imgdef['format'] in CONF.image.disk_formats:
-                # These are expected to work
+            try:
                 self._test_image(self.imgdef)
-            else:
-                # If this is not configured to be supported, we should get
-                # a BadRequest from glance
-                self.assertRaises(lib_exc.BadRequest,
-                                  self._test_image, self.imgdef)
+            except lib_exc.BadRequest:
+                format = self.imgdef['format']
+                if format == 'gpt' and format not in CONF.image.disk_formats:
+                    # If we don't have gpt defined, we don't expect this to
+                    # work because glance has not been updated for GPT
+                    # FIXME(danms): Remove this once glance support for GPT is
+                    # landed on master
+                    self.skipTest('GPT not configured and glance is too '
+                                  'old to support it')
+                raise
         else:
             self.skipTest(
                 'Glance does not currently reject unusable images on upload')
@@ -134,15 +138,23 @@
             # the import even starts until glance has it in its API
             # schema as a valid value. Other formats expected to fail
             # do so during import and return to queued state.
-            if self.imgdef['format'] not in CONF.image.disk_formats:
-                self.assertRaises(lib_exc.BadRequest,
-                                  self._test_image,
-                                  self.imgdef, asimport=True)
-            else:
+            try:
                 image = self._test_image(self.imgdef, asimport=True)
                 waiters.wait_for_image_status(self.client, image['id'],
-                                              'queued')
+                                              ['queued', 'active'])
                 self.client.delete_image(image['id'])
+            except lib_exc.BadRequest:
+                format = self.imgdef['format']
+                if format == 'gpt' and format not in CONF.image.disk_formats:
+                    # If we don't have gpt defined, we don't expect this to
+                    # work because glance has not been updated for GPT
+                    # FIXME(danms): Remove this once glance support for GPT is
+                    # landed on master
+                    self.skipTest('GPT not configured and glance is too '
+                                  'old to support it')
+                elif format in CONF.image.disk_formats:
+                    # This is in our config so it's supposed to work, fail
+                    raise
 
         if self.imgdef['format'] == 'iso':
             # NOTE(danms): Glance has a special case to not convert ISO images
diff --git a/tempest/config.py b/tempest/config.py
index 9b2cea4..9c288ff 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -540,12 +540,8 @@
                      'be same as nova.conf: vnc.enabled'),
     cfg.BoolOpt('spice_console',
                 default=False,
-                help='Enable Spice console. This configuration value should '
-                     'be same as nova.conf: spice.enabled',
-                deprecated_for_removal=True,
-                deprecated_reason="This config option is not being used "
-                                  "in Tempest, we can add it back when "
-                                  "adding the test cases."),
+                help='Enable SPICE console. This configuration value should '
+                     'be same as nova.conf: spice.enabled'),
     cfg.BoolOpt('serial_console',
                 default=False,
                 help='Enable serial console. This configuration value '
@@ -586,10 +582,13 @@
                 default=True,
                 help='Enable special configuration drive with metadata.'),
     cfg.ListOpt('scheduler_enabled_filters',
-                default=["AvailabilityZoneFilter", "ComputeFilter",
-                         "ComputeCapabilitiesFilter", "ImagePropertiesFilter",
-                         "ServerGroupAntiAffinityFilter",
-                         "ServerGroupAffinityFilter"],
+                default=[
+                    "ComputeFilter",
+                    "ComputeCapabilitiesFilter",
+                    "ImagePropertiesFilter",
+                    "ServerGroupAntiAffinityFilter",
+                    "ServerGroupAffinityFilter",
+                ],
                 help="A list of enabled filters that Nova will accept as "
                      "hints to the scheduler when creating a server. If the "
                      "default value is overridden in nova.conf by the test "
@@ -1121,8 +1120,9 @@
                help='Volume types used for data volumes. Multiple volume '
                     'types can be assigned.'),
     cfg.BoolOpt('enable_volume_image_dep_tests',
+                deprecated_name='volume_image_dep_tests',
                 default=True,
-                help='Run tests for dependencies between images, volumes'
+                help='Run tests for dependencies between images, volumes '
                 'and instance snapshots')
 ]
 
diff --git a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
index b36c9d6..d5c9b95 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
@@ -127,3 +127,16 @@
         {'type': 'null'}
     ]
 }
+
+server_id = {
+    'type': 'string', 'format': 'uuid'
+}
+
+name = {
+    # NOTE: Nova v2.1 API contains some 'name' parameters such
+    # as server, flavor, aggregate and so on. They are
+    # stored in the DB and Nova specific parameters.
+    # This definition is used for all their parameters.
+    'type': 'string', 'minLength': 1, 'maxLength': 255,
+    'format': 'name'
+}
diff --git a/tempest/serial_tests/api/admin/__init__.py b/tempest/lib/api_schema/response/compute/v2_100/__init__.py
similarity index 100%
copy from tempest/serial_tests/api/admin/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_100/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_100/servers.py b/tempest/lib/api_schema/response/compute/v2_100/servers.py
new file mode 100644
index 0000000..8721387
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_100/servers.py
@@ -0,0 +1,129 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_99 import servers as servers299
+
+###########################################################################
+#
+# 2.100:
+#
+# The scheduler_hints parameter is now returned in the response body
+# of the following calls:
+# - GET /servers/detail
+# - GET /servers/{server_id}
+# - POST /server/{server_id}/action (rebuild)
+# - PUT /servers/{server_id}
+#
+###########################################################################
+
+_hints = {
+    'type': 'object',
+    'properties': {
+        'group': {
+            'type': 'string',
+            'format': 'uuid'
+        },
+        'different_host': {
+            # NOTE: The value of 'different_host' is the set of server
+            # uuids where a new server is scheduled on a different host.
+            # A user can specify one server as string parameter and should
+            # specify multiple servers as array parameter instead.
+            'oneOf': [
+                {
+                    'type': 'string',
+                    'format': 'uuid'
+                },
+                {
+                    'type': 'array',
+                    'items': parameter_types.server_id
+                }
+            ]
+        },
+        'same_host': {
+            # NOTE: The value of 'same_host' is the set of server
+            # uuids where a new server is scheduled on the same host.
+            'type': ['string', 'array'],
+            'items': parameter_types.server_id
+        },
+        'query': {
+            # NOTE: The value of 'query' is converted to dict data with
+            # jsonutils.loads() and used for filtering hosts.
+            'type': ['string', 'object'],
+        },
+        # NOTE: The value of 'target_cell' is the cell name what cell
+        # a new server is scheduled on.
+        'target_cell': parameter_types.name,
+        'different_cell': {
+            'type': ['string', 'array'],
+            'items': {
+                'type': 'string'
+            }
+        },
+        'build_near_host_ip': parameter_types.ip_address,
+        'cidr': {
+            'type': 'string',
+            'pattern': '^/[0-9a-f.:]+$'
+        },
+    },
+    # NOTE: As this Mail:
+    # http://lists.openstack.org/pipermail/openstack-dev/2015-June/067996.html
+    # pointed out the limit the scheduler-hints in the API is problematic. So
+    # relax it.
+    'additionalProperties': True
+}
+
+get_server = copy.deepcopy(servers299.get_server)
+get_server['response_body']['properties']['server'][
+    'properties'].update({'scheduler_hints': _hints})
+get_server['response_body']['properties']['server'][
+    'required'].append('scheduler_hints')
+
+list_servers_detail = copy.deepcopy(servers299.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'properties'].update({'scheduler_hints': _hints})
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'required'].append('scheduler_hints')
+
+rebuild_server = copy.deepcopy(servers299.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+    'properties'].update({'scheduler_hints': _hints})
+rebuild_server['response_body']['properties']['server'][
+    'required'].append('scheduler_hints')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers299.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'scheduler_hints': _hints})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('scheduler_hints')
+
+update_server = copy.deepcopy(servers299.update_server)
+update_server['response_body']['properties']['server'][
+    'properties'].update({'scheduler_hints': _hints})
+update_server['response_body']['properties']['server'][
+    'required'].append('scheduler_hints')
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.99***
+attach_volume = copy.deepcopy(servers299.attach_volume)
+show_volume_attachment = copy.deepcopy(servers299.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers299.list_volume_attachments)
+list_servers = copy.deepcopy(servers299.list_servers)
+show_server_diagnostics = copy.deepcopy(servers299.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers299.get_remote_consoles)
+show_instance_action = copy.deepcopy(servers299.show_instance_action)
+create_backup = copy.deepcopy(servers299.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index e6b2c32..05ab616 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -46,7 +46,8 @@
                 'properties': {
                     'protocol': {'enum': ['vnc', 'rdp', 'serial', 'spice']},
                     'type': {'enum': ['novnc', 'xvpvnc', 'rdp-html5',
-                                      'spice-html5', 'serial']},
+                                      'spice-html5',
+                                      'serial']},
                     'url': {
                         'type': 'string',
                         'format': 'uri'
diff --git a/tempest/lib/api_schema/response/compute/v2_96/servers.py b/tempest/lib/api_schema/response/compute/v2_96/servers.py
index 7036a11..8a4ed9f 100644
--- a/tempest/lib/api_schema/response/compute/v2_96/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_96/servers.py
@@ -26,17 +26,45 @@
 #
 # - GET /servers/detail
 # - GET /servers/{server_id}
+# - PUT /servers/{server_id}
+# - POST /servers/{server_id}/action (rebuild)
 ###########################################################################
 
 get_server = copy.deepcopy(servers289.get_server)
 get_server['response_body']['properties']['server'][
     'properties'].update(
         {'pinned_availability_zone': {'type': ['string', 'null']}})
+get_server['response_body']['properties']['server'][
+    'required'].append('pinned_availability_zone')
 
 list_servers_detail = copy.deepcopy(servers289.list_servers_detail)
 list_servers_detail['response_body']['properties']['servers']['items'][
     'properties'].update(
         {'pinned_availability_zone': {'type': ['string', 'null']}})
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'required'].append('pinned_availability_zone')
+
+update_server = copy.deepcopy(servers289.update_server)
+update_server['response_body']['properties']['server'][
+    'properties'].update(
+        {'pinned_availability_zone': {'type': ['string', 'null']}})
+update_server['response_body']['properties']['server'][
+    'required'].append('pinned_availability_zone')
+
+rebuild_server = copy.deepcopy(servers289.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+    'properties'].update(
+        {'pinned_availability_zone': {'type': ['string', 'null']}})
+rebuild_server['response_body']['properties']['server'][
+    'required'].append('pinned_availability_zone')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers289.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update(
+        {'pinned_availability_zone': {'type': ['string', 'null']}})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('pinned_availability_zone')
 
 # NOTE(zhufl): Below are the unchanged schema in this microversion. We
 # need to keep this schema in this file to have the generic way to select the
@@ -45,10 +73,6 @@
 attach_volume = copy.deepcopy(servers289.attach_volume)
 show_volume_attachment = copy.deepcopy(servers289.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers289.list_volume_attachments)
-rebuild_server = copy.deepcopy(servers289.rebuild_server)
-rebuild_server_with_admin_pass = copy.deepcopy(
-    servers289.rebuild_server_with_admin_pass)
-update_server = copy.deepcopy(servers289.update_server)
 list_servers = copy.deepcopy(servers289.list_servers)
 show_server_diagnostics = copy.deepcopy(servers289.show_server_diagnostics)
 get_remote_consoles = copy.deepcopy(servers289.get_remote_consoles)
diff --git a/tempest/serial_tests/api/admin/__init__.py b/tempest/lib/api_schema/response/compute/v2_98/__init__.py
similarity index 100%
copy from tempest/serial_tests/api/admin/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_98/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_98/servers.py b/tempest/lib/api_schema/response/compute/v2_98/servers.py
new file mode 100644
index 0000000..2fca3eb
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_98/servers.py
@@ -0,0 +1,85 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_96 import servers as servers296
+
+
+###########################################################################
+#
+# 2.98:
+#
+# The image properties parameter is now returned in the response body of the
+# following calls:
+#
+# - GET /servers/detail
+# - GET /servers/{server_id}
+# - PUT /servers/{server_id}
+# - POST /servers/{server_id}/action (rebuild)
+#
+###########################################################################
+
+image_properties = {
+    'type': 'object',
+    'patternProperties': {
+        '^[a-zA-Z0-9_:. ]{1,255}$': {
+            'oneOf': [
+                {'type': 'string', 'maxLength': 255},
+                {'type': 'null'},
+            ]
+        },
+    },
+    'additionalProperties': False,
+}
+
+get_server = copy.deepcopy(servers296.get_server)
+get_server['response_body']['properties']['server']['properties'][
+    'image']['oneOf'][0]['properties'].update({'properties': image_properties})
+
+list_servers_detail = copy.deepcopy(servers296.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'properties']['image']['oneOf'][0]['properties'].update(
+        {'properties': image_properties})
+
+update_server = copy.deepcopy(servers296.update_server)
+update_server['response_body']['properties']['server']['properties'][
+    'image']['oneOf'][0]['properties'].update({'properties': image_properties})
+
+rebuild_server = copy.deepcopy(servers296.rebuild_server)
+rebuild_server['response_body']['properties']['server']['properties'][
+    'image']['oneOf'][0]['properties'].update({'properties': image_properties})
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers296.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties']['image']['oneOf'][0]['properties'].update(
+        {'properties': image_properties})
+
+# Below are the unchanged schema in this microversion. We need to keep this
+# schema in this file to have the generic way to select the right schema based
+# on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.96***
+attach_volume = copy.deepcopy(servers296.attach_volume)
+show_volume_attachment = copy.deepcopy(servers296.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers296.list_volume_attachments)
+list_servers = copy.deepcopy(servers296.list_servers)
+show_server_diagnostics = copy.deepcopy(servers296.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers296.get_remote_consoles)
+list_tags = copy.deepcopy(servers296.list_tags)
+update_all_tags = copy.deepcopy(servers296.update_all_tags)
+delete_all_tags = copy.deepcopy(servers296.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers296.check_tag_existence)
+update_tag = copy.deepcopy(servers296.update_tag)
+delete_tag = copy.deepcopy(servers296.delete_tag)
+show_instance_action = copy.deepcopy(servers296.show_instance_action)
+create_backup = copy.deepcopy(servers296.create_backup)
diff --git a/tempest/serial_tests/api/admin/__init__.py b/tempest/lib/api_schema/response/compute/v2_99/__init__.py
similarity index 100%
copy from tempest/serial_tests/api/admin/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_99/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_99/servers.py b/tempest/lib/api_schema/response/compute/v2_99/servers.py
new file mode 100644
index 0000000..e667321
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_99/servers.py
@@ -0,0 +1,78 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_98 import servers
+
+# NOTE: Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.98 ******
+list_servers = copy.deepcopy(servers.list_servers)
+get_server = copy.deepcopy(servers.get_server)
+list_servers_detail = copy.deepcopy(servers.list_servers_detail)
+update_server = copy.deepcopy(servers.update_server)
+rebuild_server = copy.deepcopy(servers.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers.rebuild_server_with_admin_pass)
+show_server_diagnostics = copy.deepcopy(servers.show_server_diagnostics)
+attach_volume = copy.deepcopy(servers.attach_volume)
+show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
+
+console_auth_tokens = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'console': {
+                'type': 'object',
+                'properties': {
+                    'instance_uuid': {'type': 'string'},
+                    'host': {'type': 'string'},
+                    'port': {'type': 'integer'},
+                    'tls_port': {'type': ['integer', 'null']},
+                    'internal_access_path': {'type': ['string', 'null']}
+                }
+            }
+        }
+    }
+}
+
+get_remote_consoles = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'remote_console': {
+                'type': 'object',
+                'properties': {
+                    'protocol': {'enum': ['vnc', 'rdp', 'serial', 'spice']},
+                    'type': {'enum': ['novnc', 'xvpvnc', 'rdp-html5',
+                                      'spice-html5', 'spice-direct',
+                                      'serial']},
+                    'url': {
+                        'type': 'string',
+                        'format': 'uri'
+                    }
+                },
+                'additionalProperties': False,
+                'required': ['protocol', 'type', 'url']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['remote_console']
+    }
+}
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index e91c87a..4a607a3 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -46,6 +46,10 @@
 from tempest.lib.api_schema.response.compute.v2_89 import servers as schemav289
 from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
 from tempest.lib.api_schema.response.compute.v2_96 import servers as schemav296
+from tempest.lib.api_schema.response.compute.v2_98 import servers as schemav298
+from tempest.lib.api_schema.response.compute.v2_99 import servers as schemav299
+from tempest.lib.api_schema.response.compute.v2_100 import \
+    servers as schemav2100  # noqa: H306
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -77,7 +81,11 @@
         {'min': '2.75', 'max': '2.78', 'schema': schemav275},
         {'min': '2.79', 'max': '2.88', 'schema': schemav279},
         {'min': '2.89', 'max': '2.95', 'schema': schemav289},
-        {'min': '2.96', 'max': None, 'schema': schemav296}]
+        {'min': '2.96', 'max': '2.97', 'schema': schemav296},
+        {'min': '2.98', 'max': '2.98', 'schema': schemav298},
+        {'min': '2.99', 'max': '2.99', 'schema': schemav299},
+        {'min': '2.100', 'max': None, 'schema': schemav2100},
+    ]
 
     def __init__(self, auth_provider, service, region,
                  enable_instance_password=True, **kwargs):
@@ -680,6 +688,19 @@
         self.validate_response(schema.get_remote_consoles, resp, body)
         return rest_client.ResponseBody(resp, body)
 
+    def get_console_auth_token_details(self, token):
+        """Turn a console auth token into hypervisor connection details.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#show-console-connection-information
+        """
+        resp, body = self.get('/os-console-auth-tokens/%s' % token)
+        body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
+        self.validate_response(schema.console_auth_tokens, resp, body)
+        return rest_client.ResponseBody(resp, body)
+
     def rescue_server(self, server_id, **kwargs):
         """Rescue the provided server.
 
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
index 0ddbec1..3d43e0a 100644
--- a/tempest/scenario/test_instances_with_cinder_volumes.py
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -12,6 +12,8 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import time
+
 from oslo_log import log as logging
 
 from tempest.common import utils
@@ -52,10 +54,11 @@
                available compute nodes, up to CONF.compute.min_compute_nodes.
                Total number of volumes is equal to
                compute nodes * len(volume_types_for_data_volume)
-            6. Attach volumes to the instances
-            7. Assign floating IP to all instances
-            8. Configure security group for ssh access to all instances
-            9. Confirm ssh access to all instances
+            6. Assign floating IP to all instances
+            7. Configure security group for ssh access to all instances
+            8. Confirm ssh access to all instances
+            9. Attach volumes to the instances; fixup device mapping if
+               required
             10. Run write test to all volumes through ssh connection per
                 instance
             11. Clean up the sources, an instance, volumes, keypair and image
@@ -143,24 +146,10 @@
         start = 0
         end = len(volume_types)
         for server in servers:
-            attached_volumes = []
-
             # wait for server to become active
             waiters.wait_for_server_status(self.servers_client,
                                            server['id'], 'ACTIVE')
 
-            # attach volumes to the instances
-            for volume in created_volumes[start:end]:
-
-                # wait for volume to become available
-                waiters.wait_for_volume_resource_status(
-                    self.volumes_client, volume['id'], 'available')
-
-                attached_volume = self.nova_volume_attach(server, volume)
-                attached_volumes.append(attached_volume)
-                LOG.debug("Attached volume %s to server %s",
-                          attached_volume['id'], server['id'])
-
             # assign floating ip
             floating_ip = None
             if (CONF.network_feature_enabled.floating_ips and
@@ -181,15 +170,19 @@
                 server=server
             )
 
+            # attach volumes to the instances
+            attached_volumes = []
+            for volume in created_volumes[start:end]:
+                attached_volume, actual_dev = self._attach_fixup(
+                    server, volume)
+                attached_volumes.append((attached_volume, actual_dev))
+                LOG.debug("Attached volume %s to server %s",
+                          attached_volume['id'], server['id'])
+
             server_name = server['name'].split('-')[-1]
 
             # run write test on all volumes
-            for volume in attached_volumes:
-
-                # dev name volume['attachments'][0]['device'][5:] is like
-                # /dev/vdb, we need to remove /dev/ -> first 5 chars
-                dev_name = volume['attachments'][0]['device'][5:]
-
+            for volume, dev_name in attached_volumes:
                 mount_path = f"/mnt/{server_name}"
 
                 timestamp_before = self.create_timestamp(
@@ -216,3 +209,49 @@
 
             start += len(volume_types)
             end += len(volume_types)
+
+    def _attach_fixup(self, server, volume):
+        """Attach a volume to the server and update the device key with the
+        device actually created inside the guest.
+        """
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, volume['id'], 'available')
+
+        list_blks = "lsblk --nodeps --noheadings --output NAME"
+
+        blks_before = set(self.linux_client.exec_command(
+            list_blks).strip().splitlines())
+
+        attached_volume = self.nova_volume_attach(server, volume)
+        # dev name volume['attachments'][0]['device'][5:] is like
+        # /dev/vdb, we need to remove /dev/ -> first 5 chars
+        dev_name = attached_volume['attachments'][0]['device'][5:]
+
+        retry = 0
+        actual_dev = None
+        blks_now = set()
+        while retry < 4 and not actual_dev:
+            try:
+                blks_now = set(self.linux_client.exec_command(
+                    list_blks).strip().splitlines())
+                for blk_dev in (blks_now - blks_before):
+                    serial = self.linux_client.exec_command(
+                        f"cat /sys/block/{blk_dev}/serial")
+                    if serial == volume['id'][:len(serial)]:
+                        actual_dev = blk_dev
+                        break
+            except exceptions.SSHExecCommandFailed:
+                retry += 1
+                time.sleep(2 ** retry)
+
+        if not actual_dev and len(blks_now - blks_before):
+            LOG.warning("Detected new devices in guest but could not match any"
+                        f" of them with the volume {volume['id']}")
+
+        if actual_dev and dev_name != actual_dev:
+            LOG.info(
+                f"OpenStack mapping {volume['id']} to device {dev_name}" +
+                f" is actually {actual_dev} inside the guest")
+            dev_name = actual_dev
+
+        return attached_volume, dev_name
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index febc2f6..aaa39c9 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -36,6 +36,11 @@
         if not CONF.service_available.cinder:
             raise cls.skipException("Cinder is not available")
 
+    @classmethod
+    def setup_clients(cls):
+        super(TestVolumeBootPattern, cls).setup_clients()
+        cls.servers_client = cls.os_primary.servers_client
+
     def _delete_server(self, server):
         self.servers_client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.servers_client, server['id'])
@@ -133,6 +138,37 @@
                                         server=server_from_snapshot)
         self.assertEqual(timestamp, timestamp3)
 
+    @decorators.idempotent_id('e3f4f2fc-5c6a-4be6-9c54-aedfc0954da7')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          'Cinder volume snapshots are disabled')
+    @utils.services('compute', 'volume', 'image')
+    def test_bootable_volume_snapshot_stop_start_instance(self):
+        # Step 1: Create a bootable volume from an image
+        volume = self.create_volume_from_image()
+
+        # Step 2: Boot an instance from the created volume
+        instance = self.boot_instance_from_resource(
+            source_id=volume['id'],
+            source_type='volume',
+            wait_until='SSHABLE'
+        )
+
+        # Step 3: Stop the instance
+        self.servers_client.stop_server(instance['id'])
+        waiters.wait_for_server_status(self.servers_client, instance['id'],
+                                       'SHUTOFF')
+
+        # Step 4: Create a snapshot of the bootable volume
+        self.create_volume_snapshot(volume['id'], force=True)
+
+        # Step 5: Start the instance and verify it returns to ACTIVE state
+        self.servers_client.start_server(instance['id'])
+        waiters.wait_for_server_status(self.servers_client, instance['id'],
+                                       'ACTIVE')
+
+        # Step 6: Verify console log
+        self.log_console_output([instance])
+
     @decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
     @decorators.attr(type='slow')
     @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
diff --git a/tempest/serial_tests/api/admin/__init__.py b/tempest/serial_tests/api/compute/__init__.py
similarity index 100%
copy from tempest/serial_tests/api/admin/__init__.py
copy to tempest/serial_tests/api/compute/__init__.py
diff --git a/tempest/serial_tests/api/admin/__init__.py b/tempest/serial_tests/api/compute/admin/__init__.py
similarity index 100%
rename from tempest/serial_tests/api/admin/__init__.py
rename to tempest/serial_tests/api/compute/admin/__init__.py
diff --git a/tempest/serial_tests/api/admin/test_aggregates.py b/tempest/serial_tests/api/compute/admin/test_aggregates.py
similarity index 100%
rename from tempest/serial_tests/api/admin/test_aggregates.py
rename to tempest/serial_tests/api/compute/admin/test_aggregates.py
diff --git a/tempest/serial_tests/api/compute/admin/test_server_affinity.py b/tempest/serial_tests/api/compute/admin/test_server_affinity.py
new file mode 100644
index 0000000..71738bc
--- /dev/null
+++ b/tempest/serial_tests/api/compute/admin/test_server_affinity.py
@@ -0,0 +1,252 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.compute import base
+from tempest.common import compute
+from tempest import config
+from tempest import exceptions
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+@decorators.serial
+class ServersAffinityTest(base.BaseV2ComputeAdminTest):
+    """Test creating servers without multi-create with scheduler_hints.
+
+    The server affinity tests in ServersOnMultiNodesTest use the /servers
+    multi-create API and therefore do not test affinity behavior when server
+    group members already exist on hosts.
+
+    These tests must be run in serial because they will be disabling compute
+    hosts in order to verify affinity behavior.
+    """
+    # 2.64 added 'policy' and 'rules' fields to POST /os-server-groups
+    min_microversion = '2.64'
+
+    @classmethod
+    def resource_setup(cls):
+        super().resource_setup()
+
+        # Disable all compute hosts except two.
+        services = cls.os_admin.services_client.list_services(
+            binary='nova-compute')['services']
+        num_extra = len(services) - 2
+        for i in range(num_extra):
+            service = services.pop()
+            cls.os_admin.services_client.update_service(
+                service['id'], status='disabled')
+        cls.services = {
+            service['host']: service['id'] for service in services}
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping affinity tests.")
+
+    def _disable_compute_host(self, hostname):
+        service_id = self.services[hostname]
+        self.os_admin.services_client.update_service(
+            service_id, status='disabled')
+        self.addCleanup(
+            self.os_admin.services_client.update_service, service_id,
+            status='enabled')
+
+    def _create_server(self, **kwargs):
+        body, servers = compute.create_test_server(
+            self.os_primary, networks='none', **kwargs)
+        for server in servers:
+            self.addCleanup(self.delete_server, server['id'])
+        return body
+
+    def _create_server_group(self, **kwargs):
+        name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name=self.__class__.__name__ + "-Server-Group")
+        group_id = self.server_groups_client.create_server_group(
+            name=name, **kwargs)['server_group']['id']
+        self.addCleanup(
+            self.server_groups_client.delete_server_group, group_id)
+        return group_id
+
+    def _create_server_in_group(self, group_id):
+        hints = {'group': group_id}
+        server = self._create_server(
+            scheduler_hints=hints, wait_until='ACTIVE')
+        # Assert the server is in the group.
+        server_group = self.server_groups_client.show_server_group(
+            group_id)['server_group']
+        self.assertIn(server['id'], server_group['members'])
+        return server
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('28ef4c29-09db-40a8-aacd-dc5fa321f35e')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
+        'ServerGroupAffinityFilter is not available.')
+    def test_create_server_with_affinity(self):
+        group_id = self._create_server_group(policy='affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Create server2 in the group.
+        server2 = self._create_server_in_group(group_id)
+
+        # Servers should be on the same host.
+        self.assertEqual(
+            self.get_host_for_server(server1['id']),
+            self.get_host_for_server(server2['id']))
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('3ac1ff4e-0fa2-4069-ae59-695cf829275b')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
+        'ServerGroupAffinityFilter is not available.')
+    def test_create_server_with_affinity_negative(self):
+        group_id = self._create_server_group(policy='affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Disable the compute host server1 is on.
+        self._disable_compute_host(self.get_host_for_server(server1['id']))
+
+        # Create server2 in the group. This should fail because affinity policy
+        # cannot be honored.
+        self.assertRaises(
+            exceptions.BuildErrorException,
+            self._create_server_in_group, group_id)
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('99cf4819-479c-4176-a9a6-ad501f6fc4b7')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
+        'ServerGroupAffinityFilter is not available.')
+    def test_create_server_with_soft_affinity(self):
+        group_id = self._create_server_group(policy='soft-affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Disable the compute host server1 is on.
+        self._disable_compute_host(self.get_host_for_server(server1['id']))
+
+        # Create server2 in the group. This should succeed because soft
+        # affinity is best effort and scheduling should go ahead even if the
+        # policy cannot be honored.
+        server2 = self._create_server_in_group(group_id)
+
+        # Servers should be on different hosts.
+        self.assertNotEqual(
+            self.get_host_for_server(server1['id']),
+            self.get_host_for_server(server2['id']))
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('475e9db0-5512-41cb-a6b2-4bd6fb3c7603')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAntiAffinityFilter"),
+        'ServerGroupAntiAffinityFilter is not available.')
+    def test_create_server_with_anti_affinity(self):
+        group_id = self._create_server_group(policy='anti-affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Create server2 in the group.
+        server2 = self._create_server_in_group(group_id)
+
+        # Servers should be on different hosts.
+        self.assertNotEqual(
+            self.get_host_for_server(server1['id']),
+            self.get_host_for_server(server2['id']))
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('c5e43585-0fdd-42a9-a525-2b99465c28df')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAntiAffinityFilter"),
+        'ServerGroupAntiAffinityFilter is not available.')
+    def test_create_server_with_anti_affinity_negative(self):
+        group_id = self._create_server_group(policy='anti-affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Disable the compute host server1 is not on.
+        self._disable_compute_host(self.get_host_other_than(server1['id']))
+
+        # Create server2 in the group. This should fail because anti-affinity
+        # policy cannot be honored.
+        self.assertRaises(
+            exceptions.BuildErrorException,
+            self._create_server_in_group, group_id)
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('88a8c3d4-c0e8-4873-ba6f-006004779f29')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAntiAffinityFilter"),
+        'ServerGroupAntiAffinityFilter is not available.')
+    def test_create_server_with_anti_affinity_max_server_per_host(self):
+        group_id = self._create_server_group(
+            policy='anti-affinity', rules={'max_server_per_host': 2})
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Disable the compute host server1 is not on.
+        self._disable_compute_host(self.get_host_other_than(server1['id']))
+
+        # Create server2 in the group. This should succeed because we are
+        # allowing a maximum of two servers per compute host for anti-affinity.
+        server2 = self._create_server_in_group(group_id)
+
+        # Servers should be on the same host.
+        self.assertEqual(
+            self.get_host_for_server(server1['id']),
+            self.get_host_for_server(server2['id']))
+
+        # A attempt to create a third server in the group should fail because
+        # we have already reached our maximum allowed servers per compute host
+        # for anti-affinity.
+        self.assertRaises(
+            exceptions.BuildErrorException,
+            self._create_server_in_group, group_id)
+
+    @decorators.attr(type='multinode')
+    @decorators.idempotent_id('ef2bc189-5ecc-4a23-8c1b-0d70a9138a77')
+    @testtools.skipUnless(
+        compute.is_scheduler_filter_enabled("ServerGroupAntiAffinityFilter"),
+        'ServerGroupAntiAffinityFilter is not available.')
+    def test_create_server_with_soft_anti_affinity(self):
+        group_id = self._create_server_group(policy='soft-anti-affinity')
+
+        # Create server1 in the group.
+        server1 = self._create_server_in_group(group_id)
+
+        # Disable the compute host server1 is not on.
+        self._disable_compute_host(self.get_host_other_than(server1['id']))
+
+        # Create server2 in the group. This should succeed because soft
+        # anti-affinity is best effort and scheduling should go ahead even if
+        # the policy cannot be honored.
+        server2 = self._create_server_in_group(group_id)
+
+        # Servers should be on the same host.
+        self.assertEqual(
+            self.get_host_for_server(server1['id']),
+            self.get_host_for_server(server2['id']))
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
index 3efc9bd..7f67328 100644
--- a/tempest/tests/cmd/test_cleanup.py
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -23,7 +23,8 @@
 
     def test_load_json_saved_state(self):
         # instantiate "empty" TempestCleanup
-        c = cleanup.TempestCleanup(None, None, 'test')
+        app = mock.Mock()
+        c = cleanup.TempestCleanup(app, None, 'test')
         test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
         with open(test_saved_json, 'r') as f:
             test_saved_json_content = json.load(f)
@@ -35,7 +36,8 @@
 
     def test_load_json_resource_list(self):
         # instantiate "empty" TempestCleanup
-        c = cleanup.TempestCleanup(None, None, 'test')
+        app = mock.Mock()
+        c = cleanup.TempestCleanup(app, None, 'test')
         test_resource_list = 'tempest/tests/cmd/test_resource_list.json'
         with open(test_resource_list, 'r') as f:
             test_resource_list_content = json.load(f)
@@ -49,7 +51,8 @@
     @mock.patch('tempest.cmd.cleanup.TempestCleanup.init')
     @mock.patch('tempest.cmd.cleanup.TempestCleanup._cleanup')
     def test_take_action_got_exception(self, mock_cleanup, mock_init):
-        c = cleanup.TempestCleanup(None, None, 'test')
+        app = mock.Mock()
+        c = cleanup.TempestCleanup(app, None, 'test')
         c.GOT_EXCEPTIONS.append('exception')
         mock_cleanup.return_value = True
         mock_init.return_value = True
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 2f21c2d..bd21e73 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -45,7 +45,7 @@
         # if things are working in latest and oldest it will work in between
         # stable branches also. If anything is breaking we will be catching
         # those in respective stable branch gate.
-        - tempest-full-2024-2:
+        - tempest-full-2025-1:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-2023-2:
             irrelevant-files: *tempest-irrelevant-files
@@ -201,12 +201,15 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-2025-1
         - tempest-full-2024-2
         - tempest-full-2024-1
         - tempest-full-2023-2
+        - tempest-slow-2025-1
         - tempest-slow-2024-2
         - tempest-slow-2024-1
         - tempest-slow-2023-2
+        - tempest-full-2025-1-extra-tests
         - tempest-full-2024-2-extra-tests
         - tempest-full-2024-1-extra-tests
         - tempest-full-2023-2-extra-tests
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 5785ec6..6409ae3 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,11 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-2025-1
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-noble
+    override-checkout: stable/2025.1
+
+- job:
     name: tempest-full-2024-2
     parent: tempest-full-py3
     nodeset: openstack-single-node-jammy
@@ -18,6 +24,12 @@
     override-checkout: stable/2023.2
 
 - job:
+    name: tempest-full-2025-1-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-noble
+    override-checkout: stable/2025.1
+
+- job:
     name: tempest-full-2024-2-extra-tests
     parent: tempest-extra-tests
     nodeset: openstack-single-node-jammy
@@ -36,6 +48,12 @@
     override-checkout: stable/2023.2
 
 - job:
+    name: tempest-slow-2025-1
+    parent: tempest-slow-py3
+    nodeset: openstack-two-node-noble
+    override-checkout: stable/2025.1
+
+- job:
     name: tempest-slow-2024-2
     parent: tempest-slow-py3
     nodeset: openstack-two-node-jammy