Merge "Add doc for multinode attribute"
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index e8734e0..b723977 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -14,7 +14,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import contextlib
 import io
 import random
 import time
@@ -29,19 +28,7 @@
 
 CONF = config.CONF
 LOG = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def retry_bad_request(fn):
-    retries = 3
-    for i in range(retries):
-        try:
-            yield
-        except lib_exc.BadRequest:
-            if i < retries:
-                time.sleep(1)
-            else:
-                raise
+BAD_REQUEST_RETRIES = 3
 
 
 class ImportImagesTest(base.BaseV2ImageTest):
@@ -832,14 +819,8 @@
         # Add a new location
         new_loc = {'metadata': {'foo': 'bar'},
                    'url': CONF.image.http_image}
-
-        # NOTE(danms): If glance was unable to fetch the remote image via
-        # HTTP, it will return BadRequest. Because this can be transient in
-        # CI, we try this a few times before we agree that it has failed
-        # for a reason worthy of failing the test.
-        with retry_bad_request():
-            self.client.update_image(image['id'], [
-                dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
 
         # The image should now be active, with one location that looks
         # like we expect
@@ -864,19 +845,29 @@
     def test_set_location(self):
         self._check_set_location()
 
+    def _update_image_with_retries(self, image, patch):
+        # NOTE(danms): If glance was unable to fetch the remote image via
+        # HTTP, it will return BadRequest. Because this can be transient in
+        # CI, we try this a few times before we agree that it has failed
+        # for a reason worthy of failing the test.
+        for i in range(BAD_REQUEST_RETRIES):
+            try:
+                self.client.update_image(image, patch)
+                break
+            except lib_exc.BadRequest:
+                if i + 1 == BAD_REQUEST_RETRIES:
+                    raise
+                else:
+                    time.sleep(1)
+
     def _check_set_multiple_locations(self):
         image = self._check_set_location()
 
         new_loc = {'metadata': {'speed': '88mph'},
                    'url': '%s#new' % CONF.image.http_image}
-
-        # NOTE(danms): If glance was unable to fetch the remote image via
-        # HTTP, it will return BadRequest. Because this can be transient in
-        # CI, we try this a few times before we agree that it has failed
-        # for a reason worthy of failing the test.
-        with retry_bad_request():
-            self.client.update_image(image['id'], [
-                dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
 
         # The image should now have two locations and the last one
         # (locations are ordered) should have the new URL.
@@ -988,8 +979,9 @@
                                        'os_hash_algo': 'sha512'},
                    'metadata': {},
                    'url': CONF.image.http_image}
-        self.client.update_image(image['id'], [
-            dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
 
         # Expect that all of our values ended up on the image
         image = self.client.show_image(image['id'])
@@ -1016,8 +1008,9 @@
                                 'os_hash_algo': orig_image['os_hash_algo']},
             'metadata': {},
             'url': '%s#new' % CONF.image.http_image}
-        self.client.update_image(orig_image['id'], [
-            dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(orig_image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
 
         # Setting the same exact values on a new location should work
         image = self.client.show_image(orig_image['id'])
@@ -1051,17 +1044,17 @@
 
             # This should always fail due to the mismatch
             self.assertRaises(lib_exc.Conflict,
-                              self.client.update_image,
-                              orig_image['id'], [
-                                  dict(add='/locations/-', value=new_loc)])
+                              self._update_image_with_retries,
+                              orig_image['id'],
+                              [dict(add='/locations/-', value=new_loc)])
 
         # Now try to add a new location with all of the substitutions,
         # which should also fail
         new_loc['validation_data'] = values
         self.assertRaises(lib_exc.Conflict,
-                          self.client.update_image,
-                          orig_image['id'], [
-                              dict(add='/locations/-', value=new_loc)])
+                          self._update_image_with_retries,
+                          orig_image['id'],
+                          [dict(add='/locations/-', value=new_loc)])
 
         # Make sure nothing has changed on our image after all the
         # above failures
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 49f9e22..9ba9949 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -20,6 +20,7 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib.decorators import cleanup_order
+from tempest.lib import exceptions as lib_exc
 import tempest.test
 
 CONF = config.CONF
@@ -126,12 +127,32 @@
 
         volume = self.volumes_client.create_volume(**kwargs)['volume']
         self.cleanup(test_utils.call_and_ignore_notfound_exc,
-                     self.delete_volume, self.volumes_client, volume['id'])
+                     self._delete_volume_for_cleanup,
+                     self.volumes_client, volume['id'])
         if wait_until:
             waiters.wait_for_volume_resource_status(self.volumes_client,
                                                     volume['id'], wait_until)
         return volume
 
+    @staticmethod
+    def _delete_volume_for_cleanup(volumes_client, volume_id):
+        """Delete a volume (only) for cleanup.
+
+        If it is attached to a server, wait for it to become available,
+        assuming we have already deleted the server and just need nova to
+        complete the delete operation before it is available to be deleted.
+        Otherwise proceed to the regular delete_volume().
+        """
+        try:
+            vol = volumes_client.show_volume(volume_id)['volume']
+            if vol['status'] == 'in-use':
+                waiters.wait_for_volume_resource_status(volumes_client,
+                                                        volume_id,
+                                                        'available')
+        except lib_exc.NotFound:
+            pass
+        BaseVolumeTest.delete_volume(volumes_client, volume_id)
+
     @cleanup_order
     def create_snapshot(self, volume_id=1, **kwargs):
         """Wrapper utility that returns a test snapshot."""
@@ -183,15 +204,17 @@
         snapshots_client.delete_snapshot(snapshot_id)
         snapshots_client.wait_for_resource_deletion(snapshot_id)
 
-    def attach_volume(self, server_id, volume_id):
+    def attach_volume(self, server_id, volume_id, wait_for_detach=True):
         """Attach a volume to a server"""
         self.servers_client.attach_volume(
             server_id, volumeId=volume_id,
             device='/dev/%s' % CONF.compute.volume_device_name)
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume_id, 'in-use')
-        self.addCleanup(waiters.wait_for_volume_resource_status,
-                        self.volumes_client, volume_id, 'available')
+        if wait_for_detach:
+            self.addCleanup(waiters.wait_for_volume_resource_status,
+                            self.volumes_client, volume_id, 'available',
+                            server_id, self.servers_client)
         self.addCleanup(self.servers_client.detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index b3a04f8..95521e7 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -44,12 +44,17 @@
     @utils.services('compute')
     def test_snapshot_create_delete_with_volume_in_use(self):
         """Test create/delete snapshot from volume attached to server"""
-        # Create a test instance
-        server = self.create_server(wait_until='SSHABLE')
         # NOTE(zhufl) Here we create volume from self.image_ref for adding
         # coverage for "creating snapshot from non-blank volume".
         volume = self.create_volume(imageRef=self.image_ref)
-        self.attach_volume(server['id'], volume['id'])
+
+        # Create a test instance
+        server = self.create_server(wait_until='SSHABLE')
+
+        # NOTE(danms): We are attaching this volume to a server, but we do
+        # not need to block on detach during cleanup because we will be
+        # deleting the server anyway.
+        self.attach_volume(server['id'], volume['id'], wait_for_detach=False)
 
         # Snapshot a volume which attached to an instance with force=False
         self.assertRaises(lib_exc.BadRequest, self.create_snapshot,
@@ -81,7 +86,11 @@
 
         # Create a server and attach it
         server = self.create_server(wait_until='SSHABLE')
-        self.attach_volume(server['id'], self.volume_origin['id'])
+        # NOTE(danms): We are attaching this volume to a server, but we do
+        # not need to block on detach during cleanup because we will be
+        # deleting the server anyway.
+        self.attach_volume(server['id'], self.volume_origin['id'],
+                           wait_for_detach=False)
 
         # Now that the volume is attached, create other snapshots
         snapshot2 = self.create_snapshot(self.volume_origin['id'], force=True)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 45a7b8a..c5da412 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -303,12 +303,16 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_volume_resource_status(client, resource_id, status):
+def wait_for_volume_resource_status(client, resource_id, status,
+                                    server_id=None, servers_client=None):
     """Waits for a volume resource to reach a given status.
 
     This function is a common function for volume, snapshot and backup
     resources. The function extracts the name of the desired resource from
     the client class name of the resource.
+
+    If server_id and servers_client are provided, dump the console for that
+    server on failure.
     """
     resource_name = re.findall(
         r'(volume|group-snapshot|snapshot|backup|group)',
@@ -330,6 +334,11 @@
             raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
 
         if int(time.time()) - start >= client.build_timeout:
+            if server_id and servers_client:
+                console_output = servers_client.get_console_output(
+                    server_id)['output']
+                LOG.debug('Console output for %s\nbody=\n%s',
+                          server_id, console_output)
             message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
                        (resource_name, resource_id, status, resource_status,
diff --git a/tempest/config.py b/tempest/config.py
index 00b394e..dfc0a8e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -975,12 +975,12 @@
                default='ecdsa',
                help='Type of key to use for ssh connections. '
                     'Valid types are rsa, ecdsa'),
-    cfg.IntOpt('allowed_network_downtime',
-               default=5.0,
-               help="Allowed VM network connection downtime during live "
-                    "migration, in seconds. "
-                    "When the measured downtime exceeds this value, an "
-                    "exception is raised."),
+    cfg.FloatOpt('allowed_network_downtime',
+                 default=5.0,
+                 help="Allowed VM network connection downtime during live "
+                      "migration, in seconds. "
+                      "When the measured downtime exceeds this value, an "
+                      "exception is raised."),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f4f37b0..e6c6eb6 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -275,7 +275,7 @@
         LOG.debug("Downtime seconds measured with downtime_meter = %r",
                   downtime)
         allowed_downtime = CONF.validation.allowed_network_downtime
-        self.assertLess(
+        self.assertLessEqual(
             downtime, allowed_downtime,
             "Downtime of {} seconds is higher than expected '{}'".format(
                 downtime, allowed_downtime))
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 2695048..93c949e 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -386,6 +386,29 @@
         mock_sleep.assert_called_once_with(1)
 
     @mock.patch.object(time, 'sleep')
+    def test_wait_for_volume_status_timeout_console(self, mock_sleep):
+        # Tests that the wait method gets the server console log if the
+        # timeout is hit.
+        client = mock.Mock(spec=volumes_client.VolumesClient,
+                           resource_type="volume",
+                           build_interval=1,
+                           build_timeout=1)
+        servers_client = mock.Mock()
+        servers_client.get_console_output.return_value = {
+            'output': 'console log'}
+        volume = {'volume': {'status': 'detaching'}}
+        mock_show = mock.Mock(return_value=volume)
+        client.show_volume = mock_show
+        volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_volume_resource_status,
+                          client, volume_id, 'available',
+                          server_id='someserver',
+                          servers_client=servers_client)
+        servers_client.get_console_output.assert_called_once_with(
+            'someserver')
+
+    @mock.patch.object(time, 'sleep')
     def test_wait_for_volume_status_error_extending(self, mock_sleep):
         # Tests that the wait method raises VolumeExtendErrorException if
         # the volume status is 'error_extending'.
diff --git a/tools/tempest-extra-tests-list.txt b/tools/tempest-extra-tests-list.txt
new file mode 100644
index 0000000..9c88109
--- /dev/null
+++ b/tools/tempest-extra-tests-list.txt
@@ -0,0 +1,20 @@
+# This file includes the list of tests which need to be
+# excluded to run from integrated testing (tempest-full job
+# or other generic jobs. We will run these tests in a separate
+# jobs. This is needed to avoid the job timeout, details in
+# bug#2004780.
+# Basic criteria to add test in this list is:
+# * Admin test which are not needed for interop and most of them
+#   are running as part of other API and Scenario tests.
+# * Negative tests which are mostly covered in tempest API tests
+#   or service unit/functional tests.
+
+# All admin tests except keystone admin test which might not have much
+# coverage in existing other tests
+tempest.api.compute.admin
+tempest.api.volume.admin
+tempest.api.image.admin
+tempest.api.network.admin
+
+# All negative tests
+negative
diff --git a/tox.ini b/tox.ini
index 972c05e..47ef5eb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -126,17 +126,49 @@
     tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
     tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)|(^tempest\.serial_tests)' {posargs}
 
+[testenv:integrated-full]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select which tests to run. It exclude the extra
+# tests mentioned in tools/tempest-extra-tests-list.txt and slow tag:
+# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
+regex1 = '(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
+regex2 = '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)|(^tempest\.serial_tests)'
+commands =
+    find . -type f -name "*.pyc" -delete
+    tempest run --regex {[testenv:integrated-full]regex1} --exclude-list ./tools/tempest-extra-tests-list.txt {posargs}
+    tempest run --combine --serial --regex {[testenv:integrated-full]regex2} {posargs}
+
+[testenv:extra-tests]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select extra tests mentioned in
+# tools/tempest-extra-tests-list.txt and exclude slow tag tests:
+# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
+exclude-regex = '\[.*\bslow\b.*\]'
+commands =
+    find . -type f -name "*.pyc" -delete
+    tempest run --exclude-regex {[testenv:extra-tests]exclude-regex} --include-list ./tools/tempest-extra-tests-list.txt {posargs}
+
 [testenv:full-parallel]
 envdir = .tox/tempest
 sitepackages = {[tempestenv]sitepackages}
 basepython = {[tempestenv]basepython}
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
+# But exlcude the extra tests mentioned in tools/tempest-extra-tests-list.txt
 regex = '(^tempest\.scenario.*)|(^tempest\.serial_tests)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
-# The regex below is used to select all tempest scenario and including the non slow api tests
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex {[testenv:full-parallel]regex} {posargs}
+    tempest run --regex {[testenv:full-parallel]regex} --exclude-list ./tools/tempest-extra-tests-list.txt {posargs}
 
 [testenv:api-microversion-tests]
 envdir = .tox/tempest
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index a67f6b4..f1e6c01 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -60,11 +60,24 @@
         c-bak: false
 
 - job:
+    name: tempest-extra-tests
+    parent: devstack-tempest
+    description: |
+      This job runs the extra tests mentioned in
+      tools/tempest-extra-tests-list.txt.
+    vars:
+      tox_envlist: extra-tests
+
+- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift enabled on py3
     # as swift is ready on py3 from stable/ussuri onwards.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    # As this use 'integrated-full' tox env which is not
+    # available in old tempest used till stable/wallaby,
+    # this job definition is only for stable/xena onwards
+    # and separate job definition until stable/wallaby
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria|wallaby)).*$
     description: |
       Base integration test with Neutron networking, horizon, swift enable,
       and py3.
@@ -74,7 +87,7 @@
     required-projects:
       - openstack/horizon
     vars:
-      tox_envlist: full
+      tox_envlist: integrated-full
       devstack_localrc:
         USE_PYTHON3: true
         FORCE_CONFIG_DRIVE: true
@@ -107,6 +120,7 @@
       # Required until bug/1949606 is resolved when using libvirt and QEMU
       # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
       configure_swap_size: 4096
+      tox_envlist: full
 
 - job:
     name: tempest-integrated-networking
@@ -246,10 +260,15 @@
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
         neutron-trunk: true
+        br-ex-tcpdump: true
+        br-int-flows: true
     group-vars:
       subnode:
         devstack_localrc:
           USE_PYTHON3: true
+        devstack_services:
+          br-ex-tcpdump: true
+          br-int-flows: true
 
 - job:
     name: tempest-slow
@@ -375,6 +394,7 @@
         CINDER_ENFORCE_SCOPE: true
         GLANCE_ENFORCE_SCOPE: true
         NEUTRON_ENFORCE_SCOPE: true
+        PLACEMENT_ENFORCE_SCOPE: true
 
 - project-template:
     name: integrated-gate-networking
@@ -390,20 +410,20 @@
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-compute
@@ -427,15 +447,15 @@
             branches: ^stable/(wallaby|xena|yoga).*$
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - tempest-integrated-compute
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     periodic-weekly:
       jobs:
         # centos-9-stream is tested from zed release onwards
@@ -457,20 +477,20 @@
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/ussuri).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-storage
@@ -487,20 +507,20 @@
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-object-storage
@@ -515,17 +535,17 @@
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 126119b..d20186e 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -26,6 +26,8 @@
               - ^.gitignore$
               - ^.gitreview$
               - ^.mailmap$
+        - tempest-extra-tests:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-ubuntu-focal:
             irrelevant-files: *tempest-irrelevant-files
         - glance-multistore-cinder-import:
@@ -35,11 +37,7 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
-        # Temporarily marked as n-v due to the below bug which blocks
-        # the CI and complicates merging of patches.
-        # https://bugs.launchpad.net/tempest/+bug/1998916
         - tempest-multinode-full-py3:
-            voting: false
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
             irrelevant-files: &tempest-irrelevant-files-2
@@ -63,6 +61,7 @@
               - ^tools/tempest-integrated-gate-placement-exclude-list.txt
               - ^tools/tempest-integrated-gate-storage-blacklist.txt
               - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/tempest-extra-tests-list.txt
               - ^tools/verify-ipv6-only-deployments.sh
               - ^tools/with_venv.sh
               # tools/ is not here since this relies on a script in tools/.
@@ -86,6 +85,7 @@
               - ^tools/tempest-integrated-gate-placement-exclude-list.txt
               - ^tools/tempest-integrated-gate-storage-blacklist.txt
               - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/tempest-extra-tests-list.txt
               - ^tools/tempest-plugin-sanity.sh
               - ^tools/with_venv.sh
               - ^.coveragerc$
@@ -131,13 +131,14 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-extra-tests:
+            irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
-        # https://bugs.launchpad.net/tempest/+bug/1998916
-        #- tempest-multinode-full-py3:
-        #    irrelevant-files: *tempest-irrelevant-files
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-enforce-scope-new-defaults:
             irrelevant-files: *tempest-irrelevant-files
         #- devstack-plugin-ceph-tempest-py3:
@@ -147,6 +148,8 @@
     experimental:
       jobs:
         - nova-multi-cell
+        - nova-ceph-multistore:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
@@ -177,6 +180,9 @@
         - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
+        - tempest-full-zed-extra-tests
+        - tempest-full-yoga-extra-tests
+        - tempest-full-xena-extra-tests
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index fb2300b..8aeb748 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -18,6 +18,24 @@
     override-checkout: stable/xena
 
 - job:
+    name: tempest-full-zed-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
+
+- job:
+    name: tempest-full-yoga-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/yoga
+
+- job:
+    name: tempest-full-xena-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/xena
+
+- job:
     name: tempest-slow-zed
     parent: tempest-slow-py3
     nodeset: openstack-two-node-focal
@@ -38,6 +56,36 @@
 - job:
     name: tempest-full-py3
     parent: devstack-tempest
+    # This job version is to use the 'full' tox env which
+    # is available for stable/ussuri to stable/wallaby also.
+    branches:
+      - stable/ussuri
+      - stable/victoria
+      - stable/wallaby
+    description: |
+      Base integration test with Neutron networking, horizon, swift enable,
+      and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
     # This job version is with swift disabled on py3
     # as swift was not ready on py3 until stable/train.
     branches:
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 684270a..972123e 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -30,6 +30,8 @@
       - opendev.org/openstack/oslo.utils
       - opendev.org/openstack/oslo.versionedobjects
       - opendev.org/openstack/oslo.vmware
+    vars:
+      tox_envlist: full
 
 - job:
     name: tempest-full-parallel