Merge "Pass the global request ID as logging context"
diff --git a/bindep.txt b/bindep.txt
index efd3a10..7d34939 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -5,7 +5,6 @@
 libffi-devel [platform:rpm]
 gcc [platform:rpm]
 gcc [platform:dpkg]
-python-dev [platform:dpkg]
 python-devel [platform:rpm]
 python3-dev [platform:dpkg]
 python3-devel [platform:rpm]
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 4ca7f0d..f630578 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,10 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Yoga
+* Xena
+* Wallaby
 * Victoria
-* Ussuri
-* Train
 
 For older OpenStack Release:
 
@@ -32,6 +33,5 @@
 
 Tempest master supports the below python versions:
 
-* Python 3.6
-* Python 3.7
 * Python 3.8
+* Python 3.9
diff --git a/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
new file mode 100644
index 0000000..ec4e2f2
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Python 3.6 and 3.7 support has been dropped. Last release of Tempest
+    to support python 3.6 and 3.7 is Temepst 30.0.0. The minimum version
+    of Python now supported by Tempest is Python 3.8.
diff --git a/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
new file mode 100644
index 0000000..c644e3a
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Zed development cycle to
+    mark the end of support for EM Victoria release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle.
diff --git a/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
new file mode 100644
index 0000000..9f4abd1
--- /dev/null
+++ b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Added new module net_downtime including the fixture NetDowntimeMeter that
+    can be used to measure how long the connectivity with an IP is lost
+    during certain operations like a server live migration.
+    The configuration option allowed_network_downtime has been added with a
+    default value of 5.0 seconds, which would be the maximum time that
+    the connectivity downtime is expected to last.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 122f7c7..e1e6597 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v31.0.0
    v30.0.0
    v29.2.0
    v29.1.0
diff --git a/releasenotes/source/v31.0.0.rst b/releasenotes/source/v31.0.0.rst
new file mode 100644
index 0000000..8fb797c
--- /dev/null
+++ b/releasenotes/source/v31.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v31.0.0 Release Notes
+=====================
+.. release-notes:: 31.0.0 Release Notes
+   :version: 31.0.0
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 1919393..d9f855a 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/train.
+   Upper constraints file to be used for stable branch till stable/victoria.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 397de1e..f302fa5 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/train
+- name: Use stable branch upper-constraints till stable/victoria
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -78,16 +78,16 @@
         exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
       when: exclude_list_stat.stat.exists
 
-- name: stable/train workaround to fallback exclude-list to blacklist
-  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
-  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
-  # does not have new args exclude-list so let's fallback to old arg
-  # if new arg is passed.
+- name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
+  # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+  # in stable/train|ussuri|victoria) which does not have new args exclude-list
+  # so let's fallback to old arg if new arg is passed.
   set_fact:
     exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
   when:
     - tempest_test_exclude_list is defined
-    - target_branch == "stable/train"
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
 
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
@@ -105,19 +105,19 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch != "stable/train"
+    - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
 
-- name: stable/train workaround to fallback exclude-regex to black-regex
-  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
-  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
-  # does not have new args exclude-regex so let's fallback to old arg
-  # if new arg is passed.
+- name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
+  # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+  # stable/train|ussuri|victoria) which does not have new args exclude-list so
+  # let's fallback to old arg if new arg is passed.
   set_fact:
     tempest_test_exclude_regex: "--black-regex={{tempest_exclude_regex|quote}}"
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch == "stable/train"
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
diff --git a/setup.cfg b/setup.cfg
index a41eccf..a531eb4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
 home_page = https://docs.openstack.org/tempest/latest/
-python_requires = >=3.6
+python_requires = >=3.8
 classifier =
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -15,8 +15,6 @@
     Operating System :: POSIX :: Linux
     Programming Language :: Python
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.6
-    Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
     Programming Language :: Python :: 3 :: Only
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index c91b557..0975702 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -34,11 +34,6 @@
 class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
     """Test live migration operations supported by admin user"""
 
-    # These tests don't attempt any SSH validation nor do they use
-    # floating IPs on the instance, so all we need is a network and
-    # a subnet so the instance being migrated has a single port, but
-    # we need that to make sure we are properly updating the port
-    # host bindings during the live migration.
     create_default_network = True
 
     @classmethod
@@ -104,6 +99,11 @@
     max_microversion = '2.24'
     block_migration = None
 
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(LiveMigrationTest, cls).setup_credentials()
+
     def _test_live_migration(self, state='ACTIVE', volume_backed=False):
         """Tests live migration between two hosts.
 
@@ -182,7 +182,12 @@
         attach volume. This differs from test_volume_backed_live_migration
         above that tests live-migration with only an attached volume.
         """
-        server = self.create_test_server(wait_until="ACTIVE")
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
         server_id = server['id']
         if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
             # not to specify a host so that the scheduler will pick one
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index c1236a7..7da87c7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -26,6 +26,11 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(TestVolumeSwapBase, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(TestVolumeSwapBase, cls).skip_checks()
         if not CONF.compute_feature_enabled.swap_volume:
@@ -100,7 +105,16 @@
         volume1 = self.create_volume()
         volume2 = self.create_volume()
         # Boot server
-        server = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as the test
+        # will attach a volume to the server and therefore cleanup will try to
+        # detach it. See bug 1960346 for details.
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
@@ -200,9 +214,18 @@
         volume2 = self.create_volume(multiattach=True)
 
         # Create two servers and wait for them to be ACTIVE.
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
         reservation_id = self.create_test_server(
-            wait_until='ACTIVE', min_count=2,
-            return_reservation_id=True)['reservation_id']
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE',
+            min_count=2,
+            return_reservation_id=True,
+        )['reservation_id']
         # Get the servers using the reservation_id.
         servers = self.servers_client.list_servers(
             reservation_id=reservation_id)['servers']
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 10d522b..91ab09e 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -28,21 +28,22 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(VolumesAdminNegativeTest, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(VolumesAdminNegativeTest, cls).skip_checks()
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-    @classmethod
-    def resource_setup(cls):
-        super(VolumesAdminNegativeTest, cls).resource_setup()
-        cls.server = cls.create_test_server(wait_until='ACTIVE')
-
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
         """Test swapping non existent volume should fail"""
+        self.server = self.create_test_server(wait_until="ACTIVE")
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -55,6 +56,17 @@
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
         """Test swapping volume to a non existence volume should fail"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as
+        # test_update_attached_volume_with_nonexistent_volume_in_body case
+        # will attach a volume to it and therefore cleanup will try to detach
+        # it. See bug 1960346 for details.
+        self.server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
+
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -76,6 +88,13 @@
     min_microversion = '2.60'
     volume_min_microversion = '3.27'
 
+    create_default_network = True
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(UpdateMultiattachVolumeNegativeTest, cls).setup_credentials()
+
     @classmethod
     def skip_checks(cls):
         super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
@@ -101,8 +120,21 @@
         vol2 = self.create_volume(multiattach=True)
 
         # Create two instances.
-        server1 = self.create_test_server(wait_until='ACTIVE')
-        server2 = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
+        server1 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
+        server2 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
 
         # Attach vol1 to both of these instances.
         vol1_attachment1 = self.attach_volume(server1, vol1)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index e16afaf..75df5ae 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -66,7 +66,9 @@
         # Setting network=True, subnet=True creates a default network
         cls.set_network_resources(
             network=cls.create_default_network,
-            subnet=cls.create_default_network)
+            subnet=cls.create_default_network,
+            router=cls.create_default_network,
+            dhcp=cls.create_default_network)
         super(BaseV2ComputeTest, cls).setup_credentials()
 
     @classmethod
@@ -412,7 +414,8 @@
         return image
 
     @classmethod
-    def recreate_server(cls, server_id, validatable=False, **kwargs):
+    def recreate_server(cls, server_id, validatable=False, wait_until='ACTIVE',
+                        **kwargs):
         """Destroy an existing class level server and creates a new one
 
         Some test classes use a test server that can be used by multiple
@@ -440,7 +443,7 @@
             validatable,
             validation_resources=cls.get_class_validation_resources(
                 cls.os_primary),
-            wait_until='ACTIVE',
+            wait_until=wait_until,
             adminPass=cls.password,
             **kwargs)
         return server['id']
@@ -455,15 +458,31 @@
         except Exception:
             LOG.exception('Failed to delete server %s', server_id)
 
-    def resize_server(self, server_id, new_flavor_id, **kwargs):
+    def resize_server(
+        self, server_id, new_flavor_id, wait_until='ACTIVE', **kwargs
+    ):
         """resize and confirm_resize an server, waits for it to be ACTIVE."""
         self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'VERIFY_RESIZE')
         self.servers_client.confirm_resize_server(server_id)
+
         waiters.wait_for_server_status(
             self.servers_client, server_id, 'ACTIVE')
         server = self.servers_client.show_server(server_id)['server']
+
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        if (
+            validation_resources and
+            wait_until in ("SSHABLE", "PINGABLE") and
+            CONF.validation.run_validation
+        ):
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, validation_resources, wait_until, True)
+
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
     def reboot_server(self, server_id, type):
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 870c6f5..bd4e0e8 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -65,12 +65,12 @@
             server = self.create_test_server(
                 validatable=True,
                 validation_resources=validation_resources,
-                wait_until='ACTIVE')
+                wait_until='SSHABLE')
             self.__class__.server_id = server['id']
         except Exception:
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.recreate_server(
-                self.server_id, validatable=True)
+                self.server_id, validatable=True, wait_until='SSHABLE')
 
     def tearDown(self):
         super(ServerActionsTestJSON, self).tearDown()
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index e4ec209..5380c67 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -378,10 +378,19 @@
                   the created volume, and dict of server ID to volumeAttachment
                   dict entries
         """
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+
         servers = []
         for x in range(2):
             name = 'multiattach-server-%i' % x
-            servers.append(self.create_test_server(name=name))
+            servers.append(
+                self.create_test_server(
+                    name=name,
+                    validatable=True,
+                    validation_resources=validation_resources
+                )
+            )
 
         # Now wait for the servers to be ACTIVE.
         for server in servers:
@@ -492,7 +501,10 @@
         servers, volume, _ = self._create_and_multiattach()
 
         for server in servers:
-            self.resize_server(server['id'], self.flavor_ref_alt)
+            # We need to wait until the guest OS fully boots up as we are going
+            # to detach volumes after the resize. See bug #1960346.
+            self.resize_server(
+                server['id'], self.flavor_ref_alt, wait_until='SSHABLE')
 
         for server in servers:
             self._detach_multiattach_volume(volume['id'], server['id'])
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index e75e22a..4ca7412 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -78,7 +78,7 @@
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hashlib.sha256
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 4ad8428..e5f4cf2 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -83,7 +83,7 @@
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hashlib.sha256
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 3c76eca..9a85ed4 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -47,6 +47,11 @@
                         'volume_backend_name',
                         'storage_protocol')
 
+        # List of storage protocols variants defined in cinder.common.constants
+        # The canonical name for storage protocol comes first in the list
+        VARIANTS = [['iSCSI', 'iscsi'], ['FC', 'fibre_channel', 'fc'],
+                    ['NFS', 'nfs'], ['NVMe-oF', 'NVMeOF', 'nvmeof']]
+
         # Get list backend capabilities using show_pools
         cinder_pools = [
             pool['capabilities'] for pool in
@@ -64,4 +69,23 @@
                                         cinder_pools)))
         observed_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
                                         capabilities)))
+
+        # Cinder Bug #1966103: Some drivers were reporting different strings
+        # to represent the same storage protocol. For backward compatibility,
+        # the scheduler can handle the variants, but to standardize this for
+        # operators (who may need to refer to the protocol in volume-type
+        # extra-specs), the get-pools response was changed by I07d74078dbb1
+        # to only report the canonical name for a storage protocol. Thus, the
+        # expected_list (which we got from the get-pools call) will only
+        # contain canonical names, while the observed_list (which we got
+        # from the driver capabilities call) may contain a variant. So before
+        # comparing the lists, we need to look for known variants in the
+        # observed_list elements and replace them with their canonical values
+        for item in range(len(observed_list)):
+            for variants in VARIANTS:
+                if observed_list[item][2] in variants:
+                    observed_list[item] = (observed_list[item][0],
+                                           observed_list[item][1],
+                                           variants[0])
+
         self.assertEqual(expected_list, observed_list)
diff --git a/tempest/common/utils/net_downtime.py b/tempest/common/utils/net_downtime.py
new file mode 100644
index 0000000..9675ec8
--- /dev/null
+++ b/tempest/common/utils/net_downtime.py
@@ -0,0 +1,63 @@
+# Copyright 2022 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import signal
+import subprocess
+
+import fixtures
+
+from oslo_log import log
+
+
+LOG = log.getLogger(__name__)
+
+
+class NetDowntimeMeter(fixtures.Fixture):
+    def __init__(self, dest_ip, interval='0.2'):
+        self.dest_ip = dest_ip
+        # Note: for intervals lower than 0.2 ping requires root privileges
+        self.interval = interval
+        self.ping_process = None
+
+    def _setUp(self):
+        self.start_background_pinger()
+
+    def start_background_pinger(self):
+        cmd = ['ping', '-q', '-s1']
+        cmd.append('-i{}'.format(self.interval))
+        cmd.append(self.dest_ip)
+        LOG.debug("Starting background pinger to '{}' with interval {}".format(
+            self.dest_ip, self.interval))
+        self.ping_process = subprocess.Popen(
+            cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self.addCleanup(self.cleanup)
+
+    def cleanup(self):
+        if self.ping_process and self.ping_process.poll() is None:
+            LOG.debug('Terminating background pinger with pid {}'.format(
+                self.ping_process.pid))
+            self.ping_process.terminate()
+        self.ping_process = None
+
+    def get_downtime(self):
+        self.ping_process.send_signal(signal.SIGQUIT)
+        # Example of the expected output:
+        # 264/274 packets, 3% loss
+        output = self.ping_process.stderr.readline().strip().decode('utf-8')
+        if output and len(output.split()[0].split('/')) == 2:
+            succ, total = output.split()[0].split('/')
+            return (int(total) - int(succ)) * float(self.interval)
+        else:
+            LOG.warning('Unexpected output obtained from the pinger: %s',
+                        output)
diff --git a/tempest/config.py b/tempest/config.py
index ebde421..4098f32 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -965,6 +965,12 @@
                default='ecdsa',
                help='Type of key to use for ssh connections. '
                     'Valid types are rsa, ecdsa'),
+    cfg.IntOpt('allowed_network_downtime',
+               default=5.0,
+               help="Allowed VM network connection downtime during live "
+                    "migration, in seconds. "
+                    "When the measured downtime exceeds this value, an "
+                    "exception is raised."),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index db28487..ce45ff6 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -419,8 +419,12 @@
 
         body = self.backups_client.restore_backup(backup_id, **kwargs)
         restore = body['restore']
-        self.addCleanup(self.volumes_client.delete_volume,
-                        restore['volume_id'])
+
+        using_pre_existing_volume = kwargs.get('volume_id', False)
+        if not using_pre_existing_volume:
+            self.addCleanup(self.volumes_client.delete_volume,
+                            restore['volume_id'])
+
         waiters.wait_for_volume_resource_status(self.backups_client,
                                                 backup_id, 'available')
         waiters.wait_for_volume_resource_status(self.volumes_client,
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index b48ac3c..1c00212 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,7 +15,9 @@
 
 import testtools
 
+from oslo_log import log
 from tempest.common import utils
+from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
@@ -23,6 +25,8 @@
 
 CONF = config.CONF
 
+LOG = log.getLogger(__name__)
+
 
 class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
     """Check VM connectivity after some advanced instance operations executed:
@@ -252,6 +256,11 @@
         block_migration = (CONF.compute_feature_enabled.
                            block_migration_for_live_migration)
         old_host = self.get_host_for_server(server['id'])
+
+        downtime_meter = net_downtime.NetDowntimeMeter(
+            floating_ip['floating_ip_address'])
+        self.useFixture(downtime_meter)
+
         self.admin_servers_client.live_migrate_server(
             server['id'], host=None, block_migration=block_migration,
             disk_over_commit=False)
@@ -261,6 +270,16 @@
         new_host = self.get_host_for_server(server['id'])
         self.assertNotEqual(old_host, new_host, 'Server did not migrate')
 
+        downtime = downtime_meter.get_downtime()
+        self.assertIsNotNone(downtime)
+        LOG.debug("Downtime seconds measured with downtime_meter = %r",
+                  downtime)
+        allowed_downtime = CONF.validation.allowed_network_downtime
+        self.assertLess(
+            downtime, allowed_downtime,
+            "Downtime of {} seconds is higher than expected '{}'".format(
+                downtime, allowed_downtime))
+
         self._wait_server_status_and_check_network_connectivity(
             server, keypair, floating_ip)
 
diff --git a/tempest/services/__init__.py b/tempest/services/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/__init__.py
+++ /dev/null
diff --git a/tempest/services/orchestration/__init__.py b/tempest/services/orchestration/__init__.py
deleted file mode 100644
index 5a1ffcc..0000000
--- a/tempest/services/orchestration/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.services.orchestration.json.orchestration_client import \
-    OrchestrationClient
-
-__all__ = ['OrchestrationClient']
diff --git a/tempest/services/orchestration/json/__init__.py b/tempest/services/orchestration/json/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/orchestration/json/__init__.py
+++ /dev/null
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
deleted file mode 100644
index 0d7720e..0000000
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-from urllib import parse as urllib
-
-from oslo_serialization import jsonutils as json
-
-from tempest import exceptions
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class OrchestrationClient(rest_client.RestClient):
-
-    def list_stacks(self, params=None):
-        """Lists all stacks for a user."""
-
-        uri = 'stacks'
-        if params:
-            uri += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_stack(self, name, disable_rollback=True, parameters=None,
-                     timeout_mins=60, template=None, template_url=None,
-                     environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment,
-            files)
-        uri = 'stacks'
-        resp, body = self.post(uri, headers=headers, body=body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_stack(self, stack_identifier, name, disable_rollback=True,
-                     parameters=None, timeout_mins=60, template=None,
-                     template_url=None, environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment)
-
-        uri = "stacks/%s" % stack_identifier
-        resp, body = self.put(uri, headers=headers, body=body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def _prepare_update_create(self, name, disable_rollback=True,
-                               parameters=None, timeout_mins=60,
-                               template=None, template_url=None,
-                               environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            "stack_name": name,
-            "disable_rollback": disable_rollback,
-            "parameters": parameters,
-            "timeout_mins": timeout_mins,
-            "template": "HeatTemplateFormatVersion: '2012-12-12'\n",
-            "environment": environment,
-            "files": files
-        }
-        if template:
-            post_body['template'] = template
-        if template_url:
-            post_body['template_url'] = template_url
-        body = json.dumps(post_body)
-
-        # Password must be provided on stack create so that heat
-        # can perform future operations on behalf of the user
-        headers = self.get_headers()
-        headers['X-Auth-Key'] = self.password
-        headers['X-Auth-User'] = self.user
-        return headers, body
-
-    def show_stack(self, stack_identifier):
-        """Returns the details of a single stack."""
-        url = "stacks/%s" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def suspend_stack(self, stack_identifier):
-        """Suspend a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'suspend': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def resume_stack(self, stack_identifier):
-        """Resume a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'resume': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def list_resources(self, stack_identifier):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource(self, stack_identifier, resource_name):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_stack(self, stack_identifier):
-        """Deletes the specified Stack."""
-        resp, _ = self.delete("stacks/%s" % str(stack_identifier))
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def wait_for_stack_status(self, stack_identifier, status,
-                              failure_pattern='^.*_FAILED$'):
-        """Waits for a Stack to reach a given status."""
-        start = int(time.time())
-        fail_regexp = re.compile(failure_pattern)
-
-        while True:
-            try:
-                body = self.show_stack(stack_identifier)['stack']
-            except lib_exc.NotFound:
-                if status == 'DELETE_COMPLETE':
-                    return
-            stack_name = body['stack_name']
-            stack_status = body['stack_status']
-            if stack_status == status:
-                return body
-            if fail_regexp.search(stack_status):
-                raise exceptions.StackBuildErrorException(
-                    stack_identifier=stack_identifier,
-                    stack_status=stack_status,
-                    stack_status_reason=body['stack_status_reason'])
-
-            if int(time.time()) - start >= self.build_timeout:
-                message = ('Stack %s failed to reach %s status (current: %s) '
-                           'within the required time (%s s).' %
-                           (stack_name, status, stack_status,
-                            self.build_timeout))
-                raise lib_exc.TimeoutException(message)
-            time.sleep(self.build_interval)
-
-    def show_resource_metadata(self, stack_identifier, resource_name):
-        """Returns the resource's metadata."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/metadata'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_events(self, stack_identifier):
-        """Returns list of all events for a stack."""
-        url = 'stacks/{stack_identifier}/events'.format(**locals())
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_resource_events(self, stack_identifier, resource_name):
-        """Returns list of all events for a resource from stack."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/events'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_event(self, stack_identifier, resource_name, event_id):
-        """Returns the details of a single stack's event."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
-               '/{event_id}'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_template(self, stack_identifier):
-        """Returns the template for the stack."""
-        url = ('stacks/{stack_identifier}/template'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def _validate_template(self, post_body):
-        """Returns the validation request result."""
-        post_body = json.dumps(post_body)
-        resp, body = self.post('validate', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def validate_template(self, template, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template': template,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def validate_template_url(self, template_url, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template_url': template_url,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def list_resource_types(self):
-        """List resource types."""
-        resp, body = self.get('resource_types')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource_type(self, resource_type_name):
-        """Return the schema of a resource type."""
-        url = 'resource_types/%s' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def show_resource_type_template(self, resource_type_name):
-        """Return the template of a resource type."""
-        url = 'resource_types/%s/template' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def create_software_config(self, name=None, config=None, group=None,
-                               inputs=None, outputs=None, options=None):
-        headers, body = self._prep_software_config_create(
-            name, config, group, inputs, outputs, options)
-
-        url = 'software_configs'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_config(self, conf_id):
-        """Returns a software configuration resource."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_config(self, conf_id):
-        """Deletes a specific software configuration."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def create_software_deploy(self, server_id=None, config_id=None,
-                               action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            None, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_software_deploy(self, deploy_id=None, server_id=None,
-                               config_id=None, action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            deploy_id, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.put(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_software_deployments(self):
-        """Returns a list of all deployments."""
-        url = 'software_deployments'
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment(self, deploy_id):
-        """Returns a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment_metadata(self, server_id):
-        """Return a config metadata for a specific server."""
-        url = 'software_deployments/metadata/%s' % server_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_deploy(self, deploy_id):
-        """Deletes a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def _prep_software_config_create(self, name=None, conf=None, group=None,
-                                     inputs=None, outputs=None, options=None):
-        """Prepares a software configuration body."""
-        post_body = {}
-        if name is not None:
-            post_body["name"] = name
-        if conf is not None:
-            post_body["config"] = conf
-        if group is not None:
-            post_body["group"] = group
-        if inputs is not None:
-            post_body["inputs"] = inputs
-        if outputs is not None:
-            post_body["outputs"] = outputs
-        if options is not None:
-            post_body["options"] = options
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
-
-    def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
-                                     config_id=None, action=None, status=None,
-                                     input_values=None, output_values=None,
-                                     status_reason=None,
-                                     signal_transport=None):
-        """Prepares a deployment create or update (if an id was given)."""
-        post_body = {}
-
-        if deploy_id is not None:
-            post_body["id"] = deploy_id
-        if server_id is not None:
-            post_body["server_id"] = server_id
-        if config_id is not None:
-            post_body["config_id"] = config_id
-        if action is not None:
-            post_body["action"] = action
-        if status is not None:
-            post_body["status"] = status
-        if input_values is not None:
-            post_body["input_values"] = input_values
-        if output_values is not None:
-            post_body["output_values"] = output_values
-        if status_reason is not None:
-            post_body["status_reason"] = status_reason
-        if signal_transport is not None:
-            post_body["signal_transport"] = signal_transport
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
diff --git a/tox.ini b/tox.ini
index b07fdaf..94eb4d9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = pep8,py36,py39,bashate,pip-check-reqs
+envlist = pep8,py39,bashate,pip-check-reqs
 minversion = 3.18.0
 skipsdist = True
 ignore_basepython_conflict = True
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 3dd8c49..7535ccc 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -350,12 +350,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-compute
@@ -380,12 +386,18 @@
         # centos-9-stream is tested from zed release onwards
         - tempest-integrated-compute-centos-9-stream:
             branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga)).*$
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - tempest-integrated-compute
         - tempest-integrated-compute-centos-9-stream
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-placement
@@ -400,12 +412,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-storage
@@ -420,12 +438,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-object-storage
@@ -438,9 +462,15 @@
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 6ab7eed..0b34ae0 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -8,10 +8,9 @@
     check:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-full-parallel:
             # Define list of irrelevant files to use everywhere else
             irrelevant-files: &tempest-irrelevant-files
@@ -41,8 +40,6 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-wallaby-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-victoria-py3:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-slow-wallaby:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
@@ -104,6 +101,9 @@
             voting: false
             irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
+            # TODO(kopecmartin): make it voting once the below bug is fixed
+            # https://bugs.launchpad.net/devstack-plugin-ceph/+bug/1975648
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
@@ -123,14 +123,16 @@
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
         - tempest-full-centos-9-stream:
+            # TODO(gmann): make it voting once below fix is merged
+            # https://review.opendev.org/c/openstack/tempest/+/842140
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
     gate:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
@@ -141,10 +143,10 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-centos-9-stream:
-            irrelevant-files: *tempest-irrelevant-files
+        #- devstack-plugin-ceph-tempest-py3:
+        #    irrelevant-files: *tempest-irrelevant-files
+        #- tempest-full-centos-9-stream:
+        #    irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
         - tempest-with-latest-microversion
@@ -168,7 +170,6 @@
         - tempest-full-yoga
         - tempest-full-xena
         - tempest-full-wallaby-py3
-        - tempest-full-victoria-py3
         - tempest-slow-yoga
         - tempest-slow-xena
         - tempest-slow-wallaby
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 00b40f5..d1445c0 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -15,11 +15,6 @@
     override-checkout: stable/wallaby
 
 - job:
-    name: tempest-full-victoria-py3
-    parent: tempest-full-py3
-    override-checkout: stable/victoria
-
-- job:
     name: tempest-slow-yoga
     parent: tempest-slow-py3
     override-checkout: stable/yoga