Merge "Reactivate image before deletion"
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index d56fb73..568077e 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -16,4 +16,17 @@
     # address is IPv6 etc. This is invoked before tests are run so that we can
     # fail early if anything missing the IPv6 settings or deployments.
     - devstack-ipv6-only-deployments-verification
-    - run-tempest
+  tasks:
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when:
+        - zuul.branch is defined
+        - zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+    - name: Run Tempest
+      include_role:
+        name: run-tempest
+      when:
+        - zuul.branch is defined
+        - zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 3b969f2..269999c 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -29,9 +29,17 @@
             (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
             (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
 
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when: (zuul.branch is defined and zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]) or
+            (zuul.override_checkout is defined and zuul.override_checkout in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
+
     - name: Run Tempest
       include_role:
         name: run-tempest
+      when: (zuul.branch is defined and zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"] and zuul.override_checkout is not defined) or
+            (zuul.override_checkout is defined and zuul.override_checkout not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
 
     - name: Run tempest cleanup dry-run
       include_role:
diff --git a/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
new file mode 100644
index 0000000..8069bd3
--- /dev/null
+++ b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Adding new config option for volume tests which allows to specify the size
+    a volume will be extended by (if a test does extend a volume or needs
+    a new bigger volume). The option is beneficial in case such tests are
+    executed on systems where the chunk size (the minimum size a volume can be
+    extended by) is other than 1 (originally hardcoded in the tests).:
+
+    CONF.volume.volume_size_extend
diff --git a/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
new file mode 100644
index 0000000..da58ba3
--- /dev/null
+++ b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add "QoS bandwidth limit rules" APIs to:
+    "tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py"  module.
+
+    * List bandwidth limit rules for QoS policy
+    * Create bandwidth limit rule
+    * Show bandwidth limit rule details
+    * Update bandwidth limit rule
+    * Delete bandwidth limit rule
\ No newline at end of file
diff --git a/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
new file mode 100644
index 0000000..b65b164
--- /dev/null
+++ b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Added QoS minimum packet rate rule client:
+
+    * create_minimum_packet_rate_rule
+    * update_minimum_packet_rate_rule
+    * show_minimum_packet_rate_rule
+    * list_minimum_packet_rate_rules
+    * delete_minimum_packet_rate_rule also
diff --git a/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
new file mode 100644
index 0000000..4b31ff8
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    Application credentials are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    application credentials testing
+    (``CONF.identity-feature-enabled.application_credentials``)
+    is now enabled by default.
+deprecations:
+  - |
+    Application credentials are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    application credentials testing
+    (``CONF.identity-feature-enabled.application_credentials``)
+    is now deprecated.
diff --git a/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
new file mode 100644
index 0000000..be2df6b
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now enabled by default.
+deprecations:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now deprecated.
diff --git a/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
new file mode 100644
index 0000000..36681c7
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Xena development cycle to
+    mark the end of support for EM Train release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Xena development
+    cycle.
diff --git a/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
new file mode 100644
index 0000000..311eca3
--- /dev/null
+++ b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add a new client for keystone's unified limits API to create and update limits.
diff --git a/releasenotes/notes/log-resource-client-20e58a295f729902.yaml b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
new file mode 100644
index 0000000..405fc5f
--- /dev/null
+++ b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add a new client to lists, creates, shows information for,
+    and updates neutron log resource.
diff --git a/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
new file mode 100644
index 0000000..ac83eaf
--- /dev/null
+++ b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Lists neutron's Loggable resources API service clients are available in
+    ``tempest/lib/services/network/loggable_resource_client.py`` module.
\ No newline at end of file
diff --git a/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
new file mode 100644
index 0000000..9e48510
--- /dev/null
+++ b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Improve cleanup after Swift testing. Ensures containers are empty before
+    deleting to prevent errors due to delayed execution.
diff --git a/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
new file mode 100644
index 0000000..218d8ca
--- /dev/null
+++ b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
@@ -0,0 +1,18 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Xena release.
+    This release marks the start of Xena release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Xena
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Yoga development
+    cycle. Every Tempest commit is also tested against master during
+    the Yoga cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Yoga (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Xena release.
+
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index ed0a09f..6a1f8b4 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@
    :maxdepth: 1
 
    unreleased
+   v29.0.0
+   v28.1.0
+   v28.0.0
    v27.0.0
    v26.1.0
    v26.0.0
diff --git a/releasenotes/source/v28.0.0.rst b/releasenotes/source/v28.0.0.rst
new file mode 100644
index 0000000..19d4218
--- /dev/null
+++ b/releasenotes/source/v28.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.0.0 Release Notes
+=====================
+.. release-notes:: 28.0.0 Release Notes
+   :version: 28.0.0
diff --git a/releasenotes/source/v28.1.0.rst b/releasenotes/source/v28.1.0.rst
new file mode 100644
index 0000000..3cc3478
--- /dev/null
+++ b/releasenotes/source/v28.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.1.0 Release Notes
+=====================
+.. release-notes:: 28.1.0 Release Notes
+   :version: 28.1.0
diff --git a/releasenotes/source/v29.0.0.rst b/releasenotes/source/v29.0.0.rst
new file mode 100644
index 0000000..d367a59
--- /dev/null
+++ b/releasenotes/source/v29.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v29.0.0 Release Notes
+=====================
+.. release-notes:: 29.0.0 Release Notes
+   :version: 29.0.0
diff --git a/roles/run-tempest-26/README.rst b/roles/run-tempest-26/README.rst
new file mode 100644
index 0000000..3643edb
--- /dev/null
+++ b/roles/run-tempest-26/README.rst
@@ -0,0 +1,83 @@
+Run Tempest
+
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+   :default: 0
+
+   The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+   :default: ''
+
+   A regular expression used to select the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+   In the following example only api scenario and third party tests
+   will be executed.
+
+       ::
+           vars:
+             tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
+
+.. zuul:rolevar:: tempest_test_blacklist
+
+   Specifies a blacklist file to skip tests that are not needed.
+
+   Pass a full path to the file.
+
+.. zuul:rolevar:: tox_envlist
+   :default: smoke
+
+   The Tempest tox environment to run.
+
+.. zuul:rolevar:: tempest_black_regex
+   :default: ''
+
+   A regular expression used to skip the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+       ::
+           vars:
+             tempest_black_regex: (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+   :default: ''
+
+   String of extra command line options to pass to tox.
+
+   Here is an example of running tox with --sitepackages option:
+
+       ::
+           vars:
+             tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+   :default: ''
+
+   The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+   :default: ''
+
+   Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+   :default: ''
+
+   Environment variable to set for run-tempst task.
+
+   Env variables set in this variable will be combined with some more
+   defaults env variable set at runtime.
diff --git a/roles/run-tempest-26/defaults/main.yaml b/roles/run-tempest-26/defaults/main.yaml
new file mode 100644
index 0000000..cbac76d
--- /dev/null
+++ b/roles/run-tempest-26/defaults/main.yaml
@@ -0,0 +1,12 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_envlist: smoke
+tempest_black_regex: ''
+tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
\ No newline at end of file
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
new file mode 100644
index 0000000..f846006
--- /dev/null
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -0,0 +1,73 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+  shell: sysctl hw.logicalcpu | cut -d' ' -f2
+  register: sysctl_hw_logicalcpu
+  when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+  set_fact:
+    num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+  set_fact:
+    default_concurrency: "{{ num_cores }}"
+  when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+  set_fact:
+    default_concurrency: "{{ num_cores|int // 2 }}"
+  when: num_cores|int > 3
+
+- name: Override target branch
+  set_fact:
+    target_branch: "{{ zuul.override_checkout }}"
+  when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/stein
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+  when:
+    - devstack_localrc is defined
+    - "'TEMPEST_BRANCH' in devstack_localrc"
+    - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+    - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+    - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'default'
+
+- name: Set OS_TEST_TIMEOUT if requested
+  set_fact:
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+  when: tempest_test_timeout != ''
+
+- when:
+    - tempest_test_blacklist is defined
+  block:
+    - name: Check for test blacklist file
+      stat:
+        path: "{{ tempest_test_blacklist }}"
+      register:
+        blacklist_stat
+
+    - name: Build blacklist option
+      set_fact:
+        blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
+      when: blacklist_stat.stat.exists
+
+- name: Run Tempest
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+            --concurrency={{tempest_concurrency|default(default_concurrency)}} \
+            --black-regex={{tempest_black_regex|quote}}
+  args:
+    chdir: "{{devstack_base_dir}}/tempest"
+  register: tempest_run_result
+  become: true
+  become_user: tempest
+  environment: "{{ tempest_tox_environment }}"
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index bee4716..922a14c 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -454,6 +454,12 @@
         server = self.servers_client.show_server(server_id)['server']
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
+    def reboot_server(self, server_id, type):
+        """Reboot a server and wait for it to be ACTIVE."""
+        self.servers_client.reboot_server(server_id, type=type)
+        waiters.wait_for_server_status(
+            self.servers_client, server_id, 'ACTIVE')
+
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
@@ -570,24 +576,33 @@
 
         attachment = self.servers_client.attach_volume(
             server['id'], **attach_kwargs)['volumeAttachment']
-        # On teardown detach the volume and for multiattach volumes wait for
-        # the attachment to be removed. For non-multiattach volumes wait for
-        # the state of the volume to change to available. This is so we don't
-        # error out when trying to delete the volume during teardown.
-        if volume['multiattach']:
-            att = waiters.wait_for_volume_attachment_create(
-                self.volumes_client, volume['id'], server['id'])
-            self.addCleanup(waiters.wait_for_volume_attachment_remove,
-                            self.volumes_client, volume['id'],
-                            att['attachment_id'])
-        else:
-            self.addCleanup(waiters.wait_for_volume_resource_status,
-                            self.volumes_client, volume['id'], 'available')
-            waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                    volume['id'], 'in-use')
-        # Ignore 404s on detach in case the server is deleted or the volume
-        # is already detached.
+
+        # NOTE(lyarwood): During attach we initially wait for the volume
+        # attachment and then check the volume state.
+        waiters.wait_for_volume_attachment_create(
+            self.volumes_client, volume['id'], server['id'])
+        # TODO(lyarwood): Remove the following volume status checks and move to
+        # attachment status checks across all volumes now with the 3.27
+        # microversion somehow.
+        if not volume['multiattach']:
+            waiters.wait_for_volume_resource_status(
+                self.volumes_client, volume['id'], 'in-use')
+
+        # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume
+        # attachment in Nova to be removed. While this technically happens last
+        # we want this to be the first waiter as if it fails we can then dump
+        # the contents of the console log. The final check of the volume state
+        # should be a no-op by this point and is just added for completeness
+        # when detaching non-multiattach volumes.
+        if not volume['multiattach']:
+            self.addCleanup(
+                waiters.wait_for_volume_resource_status, self.volumes_client,
+                volume['id'], 'available')
+        self.addCleanup(
+            waiters.wait_for_volume_attachment_remove_from_server,
+            self.servers_client, server['id'], volume['id'])
         self.addCleanup(self._detach_volume, server, volume)
+
         return attachment
 
     def create_volume_snapshot(self, volume_id, name=None, description=None,
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 671a779..a1f3514 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -109,9 +109,7 @@
                           sg['id'])
 
         # Reboot and add the other security group
-        self.servers_client.reboot_server(server_id, type='HARD')
-        waiters.wait_for_server_status(self.servers_client, server_id,
-                                       'ACTIVE')
+        self.reboot_server(server_id, type='HARD')
         self.servers_client.add_security_group(server_id, name=sg2['name'])
 
         # Check that we are not able to delete the other security
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 48f32a8..c9aec62 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -180,3 +180,56 @@
         if not utils.get_service_list()['volume']:
             msg = "Volume service not enabled."
             raise cls.skipException(msg)
+
+
+class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
+    """Test creating server with FQDN hostname and verifying atrributes
+
+    Starting Wallaby release, Nova sanitizes freeform characters in
+    server hostname with dashes. This test verifies the same.
+    """
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(ServersTestFqdnHostnames, cls).setup_credentials()
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersTestFqdnHostnames, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    @decorators.idempotent_id('622066d2-39fc-4c09-9eeb-35903c114a0a')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.hostname_fqdn_sanitization,
+        'FQDN hostname sanitization is not supported.')
+    @testtools.skipUnless(CONF.validation.run_validation,
+                          'Instance validation tests are disabled.')
+    def test_create_server_with_fqdn_name(self):
+        """Test to create an instance with FQDN type name scheme"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        self.server_name = 'guest-instance-1.domain.com'
+        self.password = data_utils.rand_password()
+        self.accessIPv4 = '2.2.2.2'
+        test_server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='ACTIVE',
+            adminPass=self.password,
+            name=self.server_name,
+            accessIPv4=self.accessIPv4)
+
+        """Verify the hostname within the instance is sanitized
+
+        Freeform characters in the hostname are replaced with dashes
+        """
+        linux_client = remote_client.RemoteClient(
+            self.get_server_ip(test_server, validation_resources),
+            self.ssh_user,
+            self.password,
+            validation_resources['keypair']['private_key'],
+            server=test_server,
+            servers_client=self.client)
+        hostname = linux_client.exec_command("hostname").rstrip()
+        self.assertEqual('guest-instance-1-domain-com', hostname)
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index 5ab592a..028da68 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -37,9 +37,7 @@
     @decorators.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
     def test_list_instance_actions(self):
         """Test listing actions of the provided server"""
-        self.client.reboot_server(self.server['id'], type='HARD')
-        waiters.wait_for_server_status(self.client,
-                                       self.server['id'], 'ACTIVE')
+        self.reboot_server(self.server['id'], type='HARD')
 
         body = (self.client.list_instance_actions(self.server['id'])
                 ['instanceActions'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index deb21c7..152e7e8 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -136,8 +136,7 @@
             # in a server
             linux_client.exec_command("sync")
 
-        self.client.reboot_server(self.server_id, type=reboot_type)
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type=reboot_type)
 
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
@@ -607,8 +606,7 @@
         # log file is truncated and we cannot get any console log through
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
-        self.client.reboot_server(self.server_id, type='HARD')
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type='HARD')
         self.wait_for(self._get_output)
 
     @decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 7251e36..4c7c234 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -455,6 +455,8 @@
 
     @utils.services('image')
     @decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
+    @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
+                          'Snapshotting is not available.')
     def test_snapshot_volume_backed_multiattach(self):
         """Boots a server from a multiattach volume and snapshots the server.
 
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 6425ea9..dc6dd4a 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -77,6 +77,8 @@
         time.sleep(1)
         self.non_admin_users_client.auth_provider.set_auth()
 
+    @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+                          'Security compliance not available.')
     @decorators.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
                       'Skipped because environment has an '
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 190d9e3..1ce9f47 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -45,11 +45,17 @@
         super(QuotasNegativeTest, self).setUp()
         name = data_utils.rand_name('test_project_')
         description = data_utils.rand_name('desc_')
-        self.project = identity.identity_utils(self.os_admin).create_project(
+        self.creds_client = identity.identity_utils(self.os_admin)
+        self.project = self.creds_client.create_project(
             name=name, description=description)
         self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
                         self.project['id'])
 
+    def tearDown(self):
+        super(QuotasNegativeTest, self).tearDown()
+        self.credentials_provider.cleanup_default_secgroup(
+            self.os_admin.security_groups_client, self.project['id'])
+
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
     def test_network_quota_exceeding(self):
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index b6bf369..47a8590 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -84,6 +84,8 @@
         cls.network_versions_client = cls.os_primary.network_versions_client
         cls.service_providers_client = cls.os_primary.service_providers_client
         cls.tags_client = cls.os_primary.tags_client
+        cls.log_resource_client = cls.os_primary.log_resource_client
+        cls.loggable_resource_client = cls.os_primary.loggable_resource_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 63078cd..190f7e0 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -51,7 +51,8 @@
 
     def _create_subnet(self, network, gateway='',
                        cidr=None, mask_bits=None, **kwargs):
-        subnet = self.create_subnet(network, gateway, cidr, mask_bits)
+        subnet = self.create_subnet(
+            network, gateway, cidr, mask_bits, **kwargs)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.subnets_client.delete_subnet, subnet['id'])
         return subnet
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 478a834..8d8039b 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.common import custom_matchers
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -27,11 +29,6 @@
 
     The containers should be visible from the container_client given.
     Will not throw any error if the containers don't exist.
-    Will not check that object and container deletions succeed.
-    After delete all the objects from a container, it will wait 2
-    seconds before delete the container itself, in order to deployments
-    using HA proxy sync the deletion properly, otherwise, the container
-    might fail to be deleted because it's not empty.
 
     :param containers: List of containers(or string of a container)
                        to be deleted
@@ -119,12 +116,20 @@
             object_name = data_utils.rand_name(name='TestObject')
         if data is None:
             data = data_utils.random_bytes()
-        cls.object_client.create_object(container_name,
-                                        object_name,
-                                        data,
-                                        metadata=metadata)
 
-        return object_name, data
+        err = Exception()
+        for _ in range(5):
+            try:
+                cls.object_client.create_object(container_name,
+                                                object_name,
+                                                data,
+                                                metadata=metadata)
+                return object_name, data
+            # after bucket creation we might see Conflict
+            except lib_exc.Conflict as e:
+                err = e
+                time.sleep(2)
+        raise err
 
     @classmethod
     def delete_containers(cls, container_client=None, object_client=None):
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index c8731fe..0259373 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -31,9 +31,10 @@
         super(ObjectTestACLs, self).setUp()
         self.container_name = self.create_container()
 
-    def tearDown(self):
-        self.delete_containers()
-        super(ObjectTestACLs, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ObjectTestACLs, cls).resource_cleanup()
 
     @decorators.idempotent_id('a3270f3f-7640-4944-8448-c7ea783ea5b6')
     def test_read_object_with_rights(self):
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 73d7f27..85e6ddb 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -42,9 +42,10 @@
         self.container_name = data_utils.rand_name(name='TestContainer')
         self.container_client.update_container(self.container_name)
 
-    def tearDown(self):
-        self.delete_containers([self.container_name])
-        super(ObjectACLsNegativeTest, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ObjectACLsNegativeTest, cls).resource_cleanup()
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('af587587-0c24-4e15-9822-8352ce711013')
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index fcd9a7c..7977a7a 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -44,10 +44,10 @@
         self.container_client.create_update_or_delete_container_metadata(
             self.container_name, create_update_metadata=metadata)
 
-    def tearDown(self):
-        """Cleans the container of any object after each test."""
-        self.delete_containers()
-        super(ContainerQuotasTest, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerQuotasTest, cls).resource_cleanup()
 
     @decorators.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
     @utils.requires_ext(extension='container_quotas', service='object')
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index 7ad6f6f..085b8ab 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -21,9 +21,10 @@
 class ContainerTest(base.BaseObjectTest):
     """Test containers"""
 
-    def tearDown(self):
-        self.delete_containers()
-        super(ContainerTest, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerTest, cls).resource_cleanup()
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index 31c33db..51c711f 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -36,6 +36,11 @@
             body = cls.capabilities_client.list_capabilities()
             cls.constraints = body['swift']
 
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerNegativeTest, cls).resource_cleanup()
+
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('30686921-4bed-4764-a038-40d741ed4e78')
     @testtools.skipUnless(
@@ -167,11 +172,7 @@
         # create a container and an object within it
         # attempt to delete a container that isn't empty.
         container_name = self.create_container()
-        self.addCleanup(self.container_client.delete_container,
-                        container_name)
         object_name, _ = self.create_object(container_name)
-        self.addCleanup(self.object_client.delete_object,
-                        container_name, object_name)
 
         ex = self.assertRaises(exceptions.Conflict,
                                self.container_client.delete_container,
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 276b826..6b1f849 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -58,6 +58,7 @@
 
         # Default container-server config only allows localhost
         cls.local_ip = '127.0.0.1'
+        cls.local_ip_v6 = '[::1]'
 
         # Must be configure according to container-sync interval
         container_sync_timeout = CONF.object_storage.container_sync_timeout
@@ -134,11 +135,18 @@
         """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
-            client_proxy_ip = \
-                urlparse.urlparse(cont_client.base_url).netloc.split(':')[0]
-            client_base_url = \
-                cont_client.base_url.replace(client_proxy_ip,
-                                             self.local_ip)
+            # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+            # handled properly as well
+            client_proxy_ip = urlparse.urlparse(
+                cont_client.base_url).netloc.rsplit(':', 1)[0]
+            if client_proxy_ip.startswith("["):  # lazy check
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip_v6)
+            else:
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip)
             headers = {'X-Container-Sync-Key': 'sync_key',
                        'X-Container-Sync-To': "%s/%s" %
                        (client_base_url, str(cont))}
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 93f6fdb..2823185 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -182,7 +182,7 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
-    @decorators.skip_because(bug='1905432')
+    @decorators.unstable_test(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 0c84357..22d12ce 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -18,7 +18,6 @@
 from tempest.api.object_storage import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 # Each segment, except for the final one, must be at least 1 megabyte
@@ -34,11 +33,7 @@
         self.objects = []
 
     def tearDown(self):
-        for obj in self.objects:
-            test_utils.call_and_ignore_notfound_exc(
-                self.object_client.delete_object,
-                self.container_name, obj)
-        self.container_client.delete_container(self.container_name)
+        self.delete_containers()
         super(ObjectSloTest, self).tearDown()
 
     def _create_object(self, container_name, object_name, data, params=None):
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 659e2c4..ddfc78a 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -64,8 +64,8 @@
 class GroupSnapshotsTest(BaseGroupSnapshotsTest):
     """Test group snapshot"""
 
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
     def test_group_snapshot_create_show_list_delete(self):
@@ -252,8 +252,8 @@
 class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
     """Test group snapshot with volume microversion greater than 3.18"""
 
-    min_microversion = '3.19'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.19'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
     @decorators.skip_because(bug='1770179')
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
index 5c5913e..63c3546 100644
--- a/tempest/api/volume/admin/test_group_type_specs.py
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -21,8 +21,8 @@
 class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
     """Test group type specs"""
 
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
     def test_group_type_specs_create_show_update_list_delete(self):
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index a7a5d6f..97455f3 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -21,8 +21,8 @@
 class GroupTypesTest(base.BaseVolumeAdminTest):
     """Test group types"""
 
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
     def test_group_type_create_list_update_show(self):
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 747a194..f16e4d2 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -25,8 +25,8 @@
 class GroupsTest(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.12"""
 
-    min_microversion = '3.13'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.13'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
     def test_group_create_show_list_delete(self):
@@ -155,8 +155,8 @@
 class GroupsV314Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.13"""
 
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
     def test_create_group_from_group(self):
@@ -192,8 +192,8 @@
 class GroupsV320Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.19"""
 
-    min_microversion = '3.20'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.20'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('b20c696b-0cbc-49a5-8b3a-b1fb9338f45c')
     def test_reset_group_status(self):
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 768c129..00b7f3a 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -24,8 +24,8 @@
 class UserMessagesTest(base.BaseVolumeAdminTest):
     """Test volume messages with microversion greater than 3.2"""
 
-    min_microversion = '3.3'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.3'
+    volume_max_microversion = 'latest'
 
     def _create_user_message(self):
         """Trigger a 'no valid host' situation to generate a message."""
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 5c14d52..4a3f494 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -13,6 +13,7 @@
 import abc
 
 from oslo_log import log as logging
+import testtools
 
 from tempest.api.volume import base
 from tempest.common import waiters
@@ -146,6 +147,8 @@
         self._retype_volume(src_vol, migration_policy='on-demand')
 
     @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          "Cinder volume snapshots are disabled.")
     def test_volume_from_snapshot_retype_with_migration(self):
         """Test volume created from snapshot retype with migration
 
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 6b2a278..70a62ff 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -77,6 +77,21 @@
             extra_spec)
 
     @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('474090d2-0824-eb3b-9335-f506b4aa49d8')
+    def test_update_nonexistent_type_id(self):
+        """Test update volume type extra specs for non existent volume type
+
+        Update volume type extra specs for non existent volume type should
+        fail.
+        """
+        spec_key = "spec1"
+        extra_spec = {spec_key: "val5"}
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.admin_volume_types_client.update_volume_type_extra_specs,
+            data_utils.rand_uuid(), spec_key, extra_spec)
+
+    @decorators.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
         """Test creating volume type extra specs for non existent volume type
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 174cf9e..f37c427 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -54,3 +54,28 @@
         volume_type = self.create_volume_type(**params)
         self.assertRaises(lib_exc.NotFound,
                           self.create_volume, volume_type=volume_type['id'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('a5924b5f-b6c1-49ba-994c-b4af55d26e52')
+    def test_create_volume_type_encryption_nonexistent_type_id(self):
+        """Test create encryption with nonexistent type id will fail"""
+        create_kwargs = {
+            'type_id': data_utils.rand_uuid(),
+            'provider': 'LuksEncryptor',
+            'key_size': 256,
+            'cipher': 'aes-xts-plain64',
+            'control_location': 'front-end'
+            }
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.create_encryption_type, **create_kwargs)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('969b10c7-3d77-4e1b-a4f2-2d265980f7e5')
+    def test_create_with_repeated_name(self):
+        """Test creating volume type with a repeated name will fail"""
+        volume_type_name = self.create_volume_type()['name']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.admin_volume_types_client.create_volume_type,
+            name=volume_type_name)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index d5c6fd9..6e34dd6 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -13,10 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.volume import api_microversion_fixture
 from tempest.common import compute
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
@@ -43,7 +43,7 @@
             raise cls.skipException(skip_msg)
 
         api_version_utils.check_skip_with_microversion(
-            cls.min_microversion, cls.max_microversion,
+            cls.volume_min_microversion, cls.volume_max_microversion,
             CONF.volume.min_microversion, CONF.volume.max_microversion)
 
     @classmethod
@@ -78,15 +78,20 @@
     def setUp(self):
         super(BaseVolumeTest, self).setUp()
         self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            self.request_microversion))
+            compute_microversion=self.compute_request_microversion,
+            volume_microversion=self.volume_request_microversion))
 
     @classmethod
     def resource_setup(cls):
         super(BaseVolumeTest, cls).resource_setup()
-        cls.request_microversion = (
+        cls.volume_request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.volume_min_microversion,
+                CONF.volume.min_microversion))
+        cls.compute_request_microversion = (
             api_version_utils.select_request_microversion(
                 cls.min_microversion,
-                CONF.volume.min_microversion))
+                CONF.compute.min_microversion))
 
         cls.image_ref = CONF.compute.image_ref
         cls.flavor_ref = CONF.compute.flavor_ref
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 9600aa9..f1dec06 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -109,8 +109,8 @@
 class VolumesTransfersV355Test(VolumesTransfersTest):
     """Test volume transfer for the "new" Transfers API mv 3.55"""
 
-    min_microversion = '3.55'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.55'
+    volume_max_microversion = 'latest'
 
     credentials = ['primary', 'alt', 'admin']
 
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index fff6a44..138d120 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -164,8 +164,8 @@
 class VolumesBackupsV39Test(base.BaseVolumeTest):
     """Test volumes backup with volume microversion greater than 3.8"""
 
-    min_microversion = '3.9'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.9'
+    volume_max_microversion = 'latest'
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index eb54426..9ca1c5e 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -49,13 +49,14 @@
         # Creates a volume from another volume passing a size different from
         # the source volume.
         src_size = CONF.volume.volume_size
+        extend_size = CONF.volume.volume_size_extend
 
         src_vol = self.create_volume(size=src_size)
         # Destination volume bigger than source
         dst_vol = self.create_volume(source_volid=src_vol['id'],
-                                     size=src_size + 1)
+                                     size=src_size + extend_size)
 
-        self._verify_volume_clone(src_vol, dst_vol, extra_size=1)
+        self._verify_volume_clone(src_vol, dst_vol, extra_size=extend_size)
 
     @decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
     @utils.services('image')
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index 4bfb166..115465c 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -36,11 +36,11 @@
         """Test cloning a volume with decreasing size will fail"""
         # Creates a volume from another volume passing a size different from
         # the source volume.
-        src_size = CONF.volume.volume_size + 1
+        src_size = CONF.volume.volume_size + CONF.volume.volume_size_extend
         src_vol = self.create_volume(size=src_size)
 
         # Destination volume smaller than source
         self.assertRaises(exceptions.BadRequest,
                           self.volumes_client.create_volume,
-                          size=src_size - 1,
+                          size=src_size - CONF.volume.volume_size_extend,
                           source_volid=src_vol['id'])
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index d9790f3..fcbc982 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -80,7 +80,7 @@
     # NOTE(mriedem): The minimum required volume API version is 3.42 and the
     # minimum required compute API microversion is 2.51, but the compute call
     # is implicit - Cinder calls Nova at that microversion, Tempest does not.
-    min_microversion = '3.42'
+    volume_min_microversion = '3.42'
 
     def _find_extend_volume_instance_action(self, server_id):
         actions = self.servers_client.list_instance_actions(
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 28e41bf..2009970 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -143,8 +143,8 @@
 class VolumesSummaryTest(base.BaseVolumeTest):
     """Test volume summary"""
 
-    min_microversion = '3.12'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.12'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('c4f2431e-4920-4736-9e00-4040386b6feb')
     def test_show_volume_summary(self):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 563820f..d9b8430 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -45,7 +45,7 @@
             container_format=CONF.image.container_formats[0],
             disk_format=CONF.image.disk_formats[0],
             visibility='private',
-            min_disk=CONF.volume.volume_size + 1)
+            min_disk=CONF.volume.volume_size + CONF.volume.volume_size_extend)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.images_client.delete_image, image['id'])
 
@@ -223,7 +223,7 @@
     @decorators.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
     def test_volume_extend_with_nonexistent_volume_id(self):
         """Test extending non existent volume should fail"""
-        extend_size = self.volume['size'] + 1
+        extend_size = self.volume['size'] + CONF.volume.volume_size_extend
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           data_utils.rand_uuid(), new_size=extend_size)
 
@@ -231,7 +231,7 @@
     @decorators.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
     def test_volume_extend_without_passing_volume_id(self):
         """Test extending volume without passing volume id should fail"""
-        extend_size = self.volume['size'] + 1
+        extend_size = self.volume['size'] + CONF.volume.volume_size_extend
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           None, new_size=extend_size)
 
diff --git a/tempest/clients.py b/tempest/clients.py
index 6080f01..6a25997 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -71,8 +71,11 @@
         self.tags_client = self.network.TagsClient()
         self.qos_client = self.network.QosClient()
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
+        self.qos_limit_bw_client = self.network.QosLimitBandwidthRulesClient()
         self.segments_client = self.network.SegmentsClient()
         self.trunks_client = self.network.TrunksClient()
+        self.log_resource_client = self.network.LogResourceClient()
+        self.loggable_resource_client = self.network.LoggableResourceClient()
 
     def _set_image_clients(self):
         if CONF.service_available.glance:
@@ -221,6 +224,8 @@
             self.identity_v3.ApplicationCredentialsClient(**params_v3)
         self.access_rules_client = \
             self.identity_v3.AccessRulesClient(**params_v3)
+        self.identity_limits_client = \
+            self.identity_v3.LimitsClient(**params_v3)
 
         # Token clients do not use the catalog. They only need default_params.
         # They read auth_url, so they should only be set if the corresponding
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index b68a879..5d6e129 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -108,7 +108,7 @@
         LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
         return nic.strip().strip(":").split('@')[0].lower()
 
-    def get_dns_servers(self):
+    def _get_dns_servers(self):
         cmd = 'cat /etc/resolv.conf'
         resolve_file = self.exec_command(cmd).strip().split('\n')
         entries = (l.split() for l in resolve_file)
@@ -116,6 +116,19 @@
                        if len(l) and l[0] == 'nameserver']
         return dns_servers
 
+    def get_dns_servers(self, timeout=5):
+        start_time = int(time.time())
+        dns_servers = []
+        while True:
+            dns_servers = self._get_dns_servers()
+            if dns_servers:
+                break
+            LOG.debug("DNS Servers list empty.")
+            if int(time.time()) - start_time >= timeout:
+                LOG.debug("DNS Servers list empty after %s.", timeout)
+                break
+        return dns_servers
+
     def _renew_lease_udhcpc(self, fixed_ip=None):
         """Renews DHCP lease via udhcpc client. """
         file_path = '/var/run/udhcpc.'
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 3750b11..f6a4555 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -356,23 +356,36 @@
     This waiter checks the compute API if the volume attachment is removed.
     """
     start = int(time.time())
-    volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+    try:
+        volumes = client.list_volume_attachments(
+            server_id)['volumeAttachments']
+    except lib_exc.NotFound:
+        # Ignore 404s on detach in case the server is deleted or the volume
+        # is already detached.
+        return
 
     while any(volume for volume in volumes if volume['volumeId'] == volume_id):
         time.sleep(client.build_interval)
 
         timed_out = int(time.time()) - start >= client.build_timeout
         if timed_out:
+            console_output = client.get_console_output(server_id)['output']
+            LOG.debug('Console output for %s\nbody=\n%s',
+                      server_id, console_output)
             message = ('Volume %s failed to detach from server %s within '
                        'the required time (%s s) from the compute API '
                        'perspective' %
                        (volume_id, server_id, client.build_timeout))
             raise lib_exc.TimeoutException(message)
-
-        volumes = client.list_volume_attachments(server_id)[
-            'volumeAttachments']
-
-    return volumes
+        try:
+            volumes = client.list_volume_attachments(
+                server_id)['volumeAttachments']
+        except lib_exc.NotFound:
+            # Ignore 404s on detach in case the server is deleted or the volume
+            # is already detached.
+            return
+    return
 
 
 def wait_for_volume_migration(client, volume_id, new_host):
diff --git a/tempest/config.py b/tempest/config.py
index c409db6..662a249 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -259,14 +259,18 @@
                 help='Does the environment have the security compliance '
                      'settings enabled?'),
     cfg.BoolOpt('project_tags',
-                default=False,
-                help='Is the project tags identity v3 API available?'),
-    # Application credentials is a default feature in Queens. This config
-    # option can removed once Pike is EOL.
+                default=True,
+                help='Is the project tags identity v3 API available?',
+                deprecated_for_removal=True,
+                deprecated_reason='Project tags API is a default feature '
+                                  'since Queens'),
     cfg.BoolOpt('application_credentials',
-                default=False,
+                default=True,
                 help='Does the environment have application credentials '
-                     'enabled?'),
+                     'enabled?',
+                deprecated_for_removal=True,
+                deprecated_reason='Application credentials is a default '
+                                  'feature since Queens'),
     # Access rules for application credentials is a default feature in Train.
     # This config option can removed once Stein is EOL.
     cfg.BoolOpt('access_rules',
@@ -437,6 +441,15 @@
     cfg.BoolOpt('disk_config',
                 default=True,
                 help="If false, skip disk config tests"),
+    # TODO(pkesav): Make it True by default once wallaby
+    # is oldest supported stable for Tempest.
+    cfg.BoolOpt('hostname_fqdn_sanitization',
+                default=False,
+                help="If false, skip fqdn instance sanitization tests. "
+                     "Nova started sanitizing the instance name by replacing "
+                     "the '.' with '-' to comply with fqdn hostname. Nova "
+                     "changed that in Wallaby cycle, if your cloud is older "
+                     "than wallaby then you can keep/make it False."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled compute extensions with a special '
@@ -1000,6 +1013,11 @@
     cfg.IntOpt('volume_size',
                default=1,
                help='Default size in GB for volumes created by volumes tests'),
+    cfg.IntOpt('volume_size_extend',
+               default=1,
+               help="Size in GB a volume is extended by - if a test "
+                    "extends a volume, the size of the new volume will be "
+                    "volume_size + volume_size_extend."),
     cfg.ListOpt('manage_volume_ref',
                 default=['source-name', 'volume-%s'],
                 help="A reference to existing volume for volume manage. "
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 069172a..2da206f 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,11 +13,13 @@
 #    limitations under the License.
 
 import abc
-
+from oslo_log import log as logging
 
 from tempest.lib import auth
 from tempest.lib import exceptions
 
+LOG = logging.getLogger(__name__)
+
 
 class CredentialProvider(object, metaclass=abc.ABCMeta):
     def __init__(self, identity_version, name=None,
@@ -125,6 +127,18 @@
     def is_role_available(self, role):
         return
 
+    def cleanup_default_secgroup(self, security_group_client, tenant):
+        resp_body = security_group_client.list_security_groups(
+            tenant_id=tenant,
+            name="default")
+        secgroups_to_delete = resp_body['security_groups']
+        for secgroup in secgroups_to_delete:
+            try:
+                security_group_client.delete_security_group(secgroup['id'])
+            except exceptions.NotFound:
+                LOG.warning('Security group %s, id %s not found for clean-up',
+                            secgroup['name'], secgroup['id'])
+
 
 class TestResources(object):
     """Readonly Credentials, with network resources added."""
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index d86522a..be8c0e8 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -254,8 +254,8 @@
                     user, role, domain)
             elif scope == 'system':
                 self.creds_client.assign_user_role_on_system(user, role)
-        LOG.info("Roles assigned to the user %s are: %s",
-                 user['id'], roles_to_assign)
+        LOG.info("Dynamic test user %s is created with scope %s and roles: %s",
+                 user['id'], scope, roles_to_assign)
 
         creds = self.creds_client.get_credentials(**cred_params)
         return cred_provider.TestResources(creds)
@@ -407,13 +407,23 @@
             # Maintained until tests are ported
             LOG.info("Acquired dynamic creds:\n"
                      " credentials: %s", credentials)
-            if (self.neutron_available and self.create_networks):
-                network, subnet, router = self._create_network_resources(
-                    credentials.tenant_id)
-                credentials.set_resources(network=network, subnet=subnet,
-                                          router=router)
-                LOG.info("Created isolated network resources for:\n"
-                         " credentials: %s", credentials)
+            # NOTE(gmann): For 'domain' and 'system' scoped token, there is no
+            # project_id so we are skipping the network creation for both
+            # scope. How these scoped token can create the network, Nova
+            # server or other project mapped resources is one of the open
+            # question and discussed a lot in Xena cycle PTG. Once we sort
+            # out that then if needed we can update the network creation here.
+            if (not scope or scope == 'project'):
+                if (self.neutron_available and self.create_networks):
+                    network, subnet, router = self._create_network_resources(
+                        credentials.tenant_id)
+                    credentials.set_resources(network=network, subnet=subnet,
+                                              router=router)
+                    LOG.info("Created isolated network resources for:\n"
+                             " credentials: %s", credentials)
+            else:
+                LOG.info("Network resources are not created for scope: %s",
+                         scope)
         return credentials
 
     # TODO(gmann): Remove this method in favor of get_project_member_creds()
@@ -508,18 +518,6 @@
             LOG.warning('network with name: %s not found for delete',
                         network_name)
 
-    def _cleanup_default_secgroup(self, tenant):
-        nsg_client = self.security_groups_admin_client
-        resp_body = nsg_client.list_security_groups(tenant_id=tenant,
-                                                    name="default")
-        secgroups_to_delete = resp_body['security_groups']
-        for secgroup in secgroups_to_delete:
-            try:
-                nsg_client.delete_security_group(secgroup['id'])
-            except lib_exc.NotFound:
-                LOG.warning('Security group %s, id %s not found for clean-up',
-                            secgroup['name'], secgroup['id'])
-
     def _clear_isolated_net_resources(self):
         client = self.routers_admin_client
         for cred in self._creds:
@@ -562,13 +560,14 @@
                 LOG.warning("user with name: %s not found for delete",
                             creds.username)
             # NOTE(zhufl): Only when neutron's security_group ext is
-            # enabled, _cleanup_default_secgroup will not raise error. But
+            # enabled, cleanup_default_secgroup will not raise error. But
             # here cannot use test_utils.is_extension_enabled for it will cause
             # "circular dependency". So here just use try...except to
             # ensure tenant deletion without big changes.
             try:
                 if self.neutron_available:
-                    self._cleanup_default_secgroup(creds.tenant_id)
+                    self.cleanup_default_secgroup(
+                        self.security_groups_admin_client, creds.tenant_id)
             except lib_exc.NotFound:
                 LOG.warning("failed to cleanup tenant %s's secgroup",
                             creds.tenant_name)
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 0ac757d..1618175 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -31,7 +31,7 @@
 @FORMAT_CHECKER.checks('iso8601-date-time')
 def _validate_datetime_format(instance):
     try:
-        if isinstance(instance, jsonschema.compat.str_types):
+        if instance is not None:
             timeutils.parse_isotime(instance)
     except ValueError:
         return False
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 573d64e..3f735f5 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -14,7 +14,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import collections
+from collections import abc
 import email.utils
 import re
 import time
@@ -884,7 +884,7 @@
                                                     resp=resp)
 
     def is_absolute_limit(self, resp, resp_body):
-        if (not isinstance(resp_body, collections.Mapping) or
+        if (not isinstance(resp_body, abc.Mapping) or
                 'retry-after' not in resp):
             return True
         return 'exceed' in resp_body.get('message', 'blabla')
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index 86fa991..af09fb1 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -32,6 +32,7 @@
 from tempest.lib.services.identity.v3.identity_client import IdentityClient
 from tempest.lib.services.identity.v3.inherited_roles_client import \
     InheritedRolesClient
+from tempest.lib.services.identity.v3.limits_client import LimitsClient
 from tempest.lib.services.identity.v3.oauth_consumers_client import \
     OAUTHConsumerClient
 from tempest.lib.services.identity.v3.oauth_token_client import \
@@ -55,7 +56,8 @@
            'DomainConfigurationClient', 'EndPointGroupsClient',
            'EndPointsClient', 'EndPointsFilterClient',
            'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
-           'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
-           'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
-           'RoleAssignmentsClient', 'RolesClient', 'ServicesClient',
-           'V3TokenClient', 'TrustsClient', 'UsersClient', 'VersionsClient']
+           'LimitsClient', 'OAUTHConsumerClient', 'OAUTHTokenClient',
+           'PoliciesClient', 'ProjectsClient', 'ProjectTagsClient',
+           'RegionsClient', 'RoleAssignmentsClient', 'RolesClient',
+           'ServicesClient', 'V3TokenClient', 'TrustsClient', 'UsersClient',
+           'VersionsClient']
diff --git a/tempest/lib/services/identity/v3/limits_client.py b/tempest/lib/services/identity/v3/limits_client.py
new file mode 100644
index 0000000..26d04bc
--- /dev/null
+++ b/tempest/lib/services/identity/v3/limits_client.py
@@ -0,0 +1,57 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class LimitsClient(rest_client.RestClient):
+    api_version = "v3"
+
+    def get_registered_limits(self):
+        """Lists all registered limits."""
+        resp, body = self.get('registered_limits')
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def create_limit(self, region_id, service_id, project_id, resource_name,
+                     default_limit, description=None, domain_id=None):
+        """Creates a limit in keystone."""
+        limit = {
+            'service_id': service_id,
+            'project_id': project_id,
+            'resource_name': resource_name,
+            'resource_limit': default_limit,
+            'region_id': region_id,
+            'description': description or '%s limit for %s' % (
+                resource_name, project_id),
+        }
+        if domain_id:
+            limit['domain_id'] = domain_id
+        post_body = json.dumps({'limits': [limit]})
+        resp, body = self.post('limits', post_body)
+        self.expected_success(201, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def update_limit(self, limit_id, resource_limit, description=None):
+        """Updates a limit in keystone by id."""
+
+        limit = {'resource_limit': resource_limit}
+        if description:
+            limit['description'] = description
+        patch_body = json.dumps({'limit': limit})
+        resp, body = self.patch('limits/%s' % limit_id, patch_body)
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index 7e57499..a0e6313 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,6 +15,9 @@
 from tempest.lib.services.network.agents_client import AgentsClient
 from tempest.lib.services.network.extensions_client import ExtensionsClient
 from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.log_resource_client import LogResourceClient
+from tempest.lib.services.network.loggable_resource_client import \
+    LoggableResourceClient
 from tempest.lib.services.network.metering_label_rules_client import \
     MeteringLabelRulesClient
 from tempest.lib.services.network.metering_labels_client import \
@@ -22,6 +25,8 @@
 from tempest.lib.services.network.networks_client import NetworksClient
 from tempest.lib.services.network.ports_client import PortsClient
 from tempest.lib.services.network.qos_client import QosClient
+from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
+    QosLimitBandwidthRulesClient
 from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
     QosMinimumBandwidthRulesClient
 from tempest.lib.services.network.quotas_client import QuotasClient
@@ -42,7 +47,9 @@
 __all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
            'MeteringLabelRulesClient', 'MeteringLabelsClient',
            'NetworksClient', 'NetworkVersionsClient', 'PortsClient',
-           'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
-           'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
+           'QosClient', 'QosMinimumBandwidthRulesClient',
+           'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
+           'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
-           'SubnetsClient', 'TagsClient', 'TrunksClient']
+           'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
+           'LoggableResourceClient']
diff --git a/tempest/lib/services/network/log_resource_client.py b/tempest/lib/services/network/log_resource_client.py
new file mode 100644
index 0000000..727b138
--- /dev/null
+++ b/tempest/lib/services/network/log_resource_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LogResourceClient(base.BaseNetworkClient):
+
+    def create_log(self, **kwargs):
+        """Creates a log resource.
+
+        Creates a log resource by using the configuration that you define in
+        the request object.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-log
+        """
+        uri = '/log/logs/'
+        post_data = {'log': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_log(self, log_id, **kwargs):
+        """Updates a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-log
+        """
+        uri = '/log/logs/%s' % log_id
+        post_data = {'log': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_log(self, log_id, **fields):
+        """Shows details for a log id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.show_resource(uri, **fields)
+
+    def delete_log(self, log_id):
+        """Deletes a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.delete_resource(uri)
+
+    def list_logs(self, **filters):
+        """Lists Logs.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-logs
+        """
+        uri = '/log/logs'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/loggable_resource_client.py b/tempest/lib/services/network/loggable_resource_client.py
new file mode 100644
index 0000000..774046f
--- /dev/null
+++ b/tempest/lib/services/network/loggable_resource_client.py
@@ -0,0 +1,29 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LoggableResourceClient(base.BaseNetworkClient):
+
+    def list_loggable_resources(self, **filters):
+        """List Loggable resources.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-loggable-resources
+        """
+        uri = '/log/loggable-resources'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..8fd87fe
--- /dev/null
+++ b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosLimitBandwidthRulesClient(base.BaseNetworkClient):
+
+    def create_limit_bandwidth_rule(self, qos_policy_id, **kwargs):
+        """Creates a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(
+            qos_policy_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_limit_bandwidth_rule(self, qos_policy_id, rule_id, **kwargs):
+        """Updates a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_limit_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-bandwidth-limit-rule-details
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_limit_bandwidth_rule(self, qos_policy_id, rule_id):
+        """Deletes a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_limit_bandwidth_rules(self, qos_policy_id, **filters):
+        """Lists all limit bandwidth rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-bandwidth-limit-rules-for-qos-policy
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(qos_policy_id)
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..98bcafe
--- /dev/null
+++ b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,73 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosMinimumPacketRateRulesClient(base.BaseNetworkClient):
+
+    def create_minimum_packet_rate_rule(self, qos_policy_id, **kwargs):
+        """Creates a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_minimum_packet_rate_rule(
+        self, qos_policy_id, rule_id, **kwargs
+    ):
+        """Updates a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_minimum_packet_rate_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-minimum-packet-rate-rule-details
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_minimum_packet_rate_rule(self, qos_policy_id, rule_id):
+        """Deletes a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_minimum_packet_rate_rules(self, qos_policy_id, **filters):
+        """Lists all minimum packet rate rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-minimum-packet-rate-rules-for-qos-policy
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index bb82975..65e8227 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -28,6 +28,8 @@
             self.get_object(container, object_name)
         except exceptions.NotFound:
             return True
+        except exceptions.Conflict:
+            return False
         return False
 
     def create_object(self, container, object_name, data,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index add5c32..cbe8c20 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -329,13 +329,16 @@
         floating_ip, server = self.floating_ip_tuple
         # get internal ports' ips:
         # get all network and compute ports in the new network
+        # NOTE(ralonsoh): device_owner="network:distributed" ports are OVN
+        # metadata ports and should be filtered out.
         internal_ips = (
             p['fixed_ips'][0]['ip_address'] for p in
             self.os_admin.ports_client.list_ports(
                 project_id=server['tenant_id'],
                 network_id=network['id'])['ports']
-            if p['device_owner'].startswith('network') or
-            p['device_owner'].startswith('compute')
+            if ((p['device_owner'].startswith('network') and
+                 not p['device_owner'] == 'network:distributed') or
+                p['device_owner'].startswith('compute'))
         )
 
         self._check_server_connectivity(floating_ip,
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_network_qos_placement.py
similarity index 89%
rename from tempest/scenario/test_minbw_allocation_placement.py
rename to tempest/scenario/test_network_qos_placement.py
index 55b8d15..db4751b 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -27,11 +27,13 @@
 CONF = config.CONF
 
 
-class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
+class NetworkQoSPlacementTestBase(manager.NetworkScenarioTest):
+    """Base class for Network QoS testing
+
+    Base class for testing Network QoS scenarios involving placement
+    resource allocations.
+    """
     credentials = ['primary', 'admin']
-    required_extensions = ['port-resource-request',
-                           'qos',
-                           'qos-bw-minimum-ingress']
     # The feature QoS minimum bandwidth allocation in Placement API depends on
     # Granular resource requests to GET /allocation_candidates and Support
     # allocation candidates with nested resource providers features in
@@ -46,21 +48,18 @@
     compute_min_microversion = '2.72'
     compute_max_microversion = 'latest'
 
-    INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
     INGRESS_DIRECTION = 'ingress'
+    BW_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
 
-    SMALLEST_POSSIBLE_BW = 1
     # For any realistic inventory value (that is inventory != MAX_INT) an
     # allocation candidate request of MAX_INT is expected to be rejected, see:
     # https://github.com/openstack/placement/blob/master/placement/
     # db/constants.py#L16
     PLACEMENT_MAX_INT = 0x7FFFFFFF
-    BANDWIDTH_1 = 1000
-    BANDWIDTH_2 = 2000
 
     @classmethod
     def setup_clients(cls):
-        super(MinBwAllocationPlacementTest, cls).setup_clients()
+        super().setup_clients()
         cls.placement_client = cls.os_admin.placement_client
         cls.networks_client = cls.os_admin.networks_client
         cls.subnets_client = cls.os_admin.subnets_client
@@ -69,7 +68,31 @@
         cls.qos_client = cls.os_admin.qos_client
         cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
         cls.flavors_client = cls.os_adm.flavors_client
-        cls.servers_client = cls.os_adm.servers_client
+        cls.servers_client = cls.os_primary.servers_client
+
+    def _create_flavor_to_resize_to(self):
+        old_flavor = self.flavors_client.show_flavor(
+            CONF.compute.flavor_ref)['flavor']
+        new_flavor = self.flavors_client.create_flavor(**{
+            'ram': old_flavor['ram'],
+            'vcpus': old_flavor['vcpus'],
+            'name': old_flavor['name'] + 'extra',
+            'disk': old_flavor['disk'] + 1
+        })['flavor']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.flavors_client.delete_flavor, new_flavor['id'])
+        return new_flavor
+
+
+class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase):
+
+    required_extensions = ['port-resource-request',
+                           'qos',
+                           'qos-bw-minimum-ingress']
+
+    SMALLEST_POSSIBLE_BW = 1
+    BANDWIDTH_1 = 1000
+    BANDWIDTH_2 = 2000
 
     @classmethod
     def skip_checks(cls):
@@ -143,23 +166,20 @@
 
     def _check_if_allocation_is_possible(self):
         alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+            resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
                                   self.SMALLEST_POSSIBLE_BW))
         if len(alloc_candidates['provider_summaries']) == 0:
-            # Skip if the backend does not support QoS minimum bandwidth
-            # allocation in Placement API
-            raise self.skipException(
-                'No allocation candidates are available for %s:%s' %
-                (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+            self.fail('No allocation candidates are available for %s:%s' %
+                      (self.BW_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
 
         # Just to be sure check with impossible high (placement max_int),
         # allocation
         alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+            resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
                                   self.PLACEMENT_MAX_INT))
         if len(alloc_candidates['provider_summaries']) != 0:
             self.fail('For %s:%s there should be no available candidate!' %
-                      (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+                      (self.BW_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
 
     def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
         wait_until = (None if status == 'ERROR' else status)
@@ -169,7 +189,7 @@
         server = self.create_server(networks=[{'port': port['id']}],
                                     wait_until=wait_until)
         waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
+            client=self.servers_client, server_id=server['id'],
             status=status, ready_wait=False, raise_on_error=False)
         return server, port
 
@@ -179,11 +199,12 @@
             consumer)['allocations']
         self.assertGreater(len(allocations), 0)
         bw_resource_in_alloc = False
+        allocation_rp = None
         for rp, resources in allocations.items():
-            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+            if self.BW_RESOURCE_CLASS in resources['resources']:
                 self.assertEqual(
                     min_kbps,
-                    resources['resources'][self.INGRESS_RESOURCE_CLASS])
+                    resources['resources'][self.BW_RESOURCE_CLASS])
                 bw_resource_in_alloc = True
                 allocation_rp = rp
         if min_kbps:
@@ -193,9 +214,19 @@
             # the rp uuid
             for port_id in port_ids:
                 port = self.os_admin.ports_client.show_port(port_id)
-                self.assertEqual(
-                    allocation_rp,
-                    port['port']['binding:profile']['allocation'])
+                port_binding_alloc = port['port']['binding:profile'][
+                    'allocation']
+                # NOTE(gibi): the format of the allocation key depends on the
+                # existence of port-resource-request-groups API extension.
+                # TODO(gibi): drop the else branch once tempest does not need
+                # to support Xena release any more.
+                if utils.is_extension_enabled(
+                        'port-resource-request-groups', 'network'):
+                    self.assertEqual(
+                        {allocation_rp},
+                        set(port_binding_alloc.values()))
+                else:
+                    self.assertEqual(allocation_rp, port_binding_alloc)
 
     @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
     @utils.services('compute', 'network')
@@ -259,9 +290,9 @@
         self._assert_allocation_is_as_expected(server['id'],
                                                [valid_port['id']])
 
-        self.servers_client.migrate_server(server_id=server['id'])
+        self.os_adm.servers_client.migrate_server(server_id=server['id'])
         waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
+            client=self.servers_client, server_id=server['id'],
             status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
 
         # TODO(lajoskatona): Check that the allocations are ok for the
@@ -269,9 +300,10 @@
         self._assert_allocation_is_as_expected(server['id'],
                                                [valid_port['id']])
 
-        self.servers_client.confirm_resize_server(server_id=server['id'])
+        self.os_adm.servers_client.confirm_resize_server(
+            server_id=server['id'])
         waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
+            client=self.servers_client, server_id=server['id'],
             status='ACTIVE', ready_wait=False, raise_on_error=True)
         self._assert_allocation_is_as_expected(server['id'],
                                                [valid_port['id']])
@@ -296,21 +328,12 @@
         self._assert_allocation_is_as_expected(server['id'],
                                                [valid_port['id']])
 
-        old_flavor = self.flavors_client.show_flavor(
-            CONF.compute.flavor_ref)['flavor']
-        new_flavor = self.flavors_client.create_flavor(**{
-            'ram': old_flavor['ram'],
-            'vcpus': old_flavor['vcpus'],
-            'name': old_flavor['name'] + 'extra',
-            'disk': old_flavor['disk'] + 1
-        })['flavor']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.flavors_client.delete_flavor, new_flavor['id'])
+        new_flavor = self._create_flavor_to_resize_to()
 
         self.servers_client.resize_server(
             server_id=server['id'], flavor_ref=new_flavor['id'])
         waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
+            client=self.servers_client, server_id=server['id'],
             status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
 
         # TODO(lajoskatona): Check that the allocations are ok for the
@@ -320,7 +343,7 @@
 
         self.servers_client.confirm_resize_server(server_id=server['id'])
         waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
+            client=self.servers_client, server_id=server['id'],
             status='ACTIVE', ready_wait=False, raise_on_error=True)
         self._assert_allocation_is_as_expected(server['id'],
                                                [valid_port['id']])
diff --git a/tempest/scenario/test_unified_limits.py b/tempest/scenario/test_unified_limits.py
new file mode 100644
index 0000000..22256b4
--- /dev/null
+++ b/tempest/scenario/test_unified_limits.py
@@ -0,0 +1,435 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from oslo_utils import units
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class ImageQuotaTest(manager.ScenarioTest):
+    credentials = ['primary', 'system_admin']
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImageQuotaTest, cls).resource_setup()
+
+        # Figure out and record the glance service id
+        services = cls.os_system_admin.identity_services_v3_client.\
+            list_services()
+        glance_services = [x for x in services['services']
+                           if x['name'] == 'glance']
+        cls.glance_service_id = glance_services[0]['id']
+
+        # Pre-create all the quota limits and record their IDs so we can
+        # update them in-place without needing to know which ones have been
+        # created and in which order.
+        cls.limit_ids = {}
+
+        try:
+            cls.limit_ids['image_size_total'] = cls._create_limit(
+                'image_size_total', 10)
+            cls.limit_ids['image_stage_total'] = cls._create_limit(
+                'image_stage_total', 10)
+            cls.limit_ids['image_count_total'] = cls._create_limit(
+                'image_count_total', 10)
+            cls.limit_ids['image_count_uploading'] = cls._create_limit(
+                'image_count_uploading', 10)
+        except lib_exc.Forbidden:
+            # If we fail to set limits, it means they are not
+            # registered, and thus we will skip these tests once we
+            # have our os_system_admin client and run
+            # check_quotas_enabled().
+            pass
+
+    def setUp(self):
+        super(ImageQuotaTest, self).setUp()
+        self.created_images = []
+
+    def create_image(self, data=None, **kwargs):
+        """Wrapper that returns a test image."""
+
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(self.__name__ + "-image")
+            kwargs['name'] = name
+
+        params = dict(kwargs)
+        if data:
+            # NOTE: On glance v1 API, the data should be passed on
+            # a header. Then here handles the data separately.
+            params['data'] = data
+
+        image = self.image_client.create_image(**params)
+        # Image objects returned by the v1 client have the image
+        # data inside a dict that is keyed against 'image'.
+        if 'image' in image:
+            image = image['image']
+        self.created_images.append(image['id'])
+        self.addCleanup(
+            self.image_client.wait_for_resource_deletion,
+            image['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.image_client.delete_image, image['id'])
+        return image
+
+    def check_quotas_enabled(self):
+        # Check to see if we should even be running these tests. Use
+        # the presence of a registered limit that we recognize as an
+        # indication.  This will be set up by the operator (or
+        # devstack) if glance is configured to use/honor the unified
+        # limits. If one is set, they must all be set, because glance
+        # has a single all-or-nothing flag for whether or not to use
+        # keystone limits. If anything, checking only one helps to
+        # assert the assumption that, if enabled, they must all be at
+        # least registered for proper operation.
+        registered_limits = self.os_system_admin.identity_limits_client.\
+            get_registered_limits()['registered_limits']
+        if 'image_count_total' not in [x['resource_name']
+                                       for x in registered_limits]:
+            raise self.skipException('Target system is not configured with '
+                                     'glance unified limits')
+
+    @classmethod
+    def _create_limit(cls, name, value):
+        return cls.os_system_admin.identity_limits_client.create_limit(
+            CONF.identity.region, cls.glance_service_id,
+            cls.image_client.tenant_id, name, value)['limits'][0]['id']
+
+    def _update_limit(self, name, value):
+        self.os_system_admin.identity_limits_client.update_limit(
+            self.limit_ids[name], value)
+
+    def _cleanup_images(self):
+        while self.created_images:
+            image_id = self.created_images.pop()
+            try:
+                self.image_client.delete_image(image_id)
+            except lib_exc.NotFound:
+                pass
+
+    @decorators.idempotent_id('9b74fe24-183b-41e6-bf42-84c2958a7be8')
+    @utils.services('image', 'identity')
+    def test_image_count_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images for our tenant to one.
+        self._update_limit('image_count_total', 1)
+
+        # Create one image
+        image = self.create_image(name='first',
+                                  container_format='bare',
+                                  disk_format='raw',
+                                  visibility='private')
+
+        # Second image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Update our limit to two.
+        self._update_limit('image_count_total', 2)
+
+        # Now the same create should succeed.
+        self.create_image(name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Third image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete the first image to put us under quota.
+        self.image_client.delete_image(image['id'])
+
+        # Now the same create should succeed.
+        self.create_image(name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('b103788b-5329-4aa9-8b0d-97f8733460db')
+    @utils.services('image', 'identity')
+    def test_image_count_uploading_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images we can have in uploading state.
+        self._update_limit('image_stage_total', 10)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 1)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage an image
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Check that we can not stage another
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image2['id'], io.BytesIO(file_content))
+
+        # ... nor upload directly
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'],
+                          io.BytesIO(file_content))
+
+        # Update our quota to make room
+        self._update_limit('image_count_uploading', 2)
+
+        # Now our upload should work
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # ...and because that is no longer in uploading state, we should be
+        # able to stage our second image from above.
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Finish our import of image2
+        self.image_client.image_import(image2['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image2['id'])
+
+        # Set our quota back to one
+        self._update_limit('image_count_uploading', 1)
+
+        # Since image1 is still staged, we should not be able to upload
+        # an image.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'],
+                          io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our uploading quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Make sure that freed up the one upload quota to complete our upload
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('05e8d064-c39a-4801-8c6a-465df375ec5b')
+    @utils.services('image', 'identity')
+    def test_image_size_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the image size for our tenant to 1MiB, and allow ten
+        # images.
+        self._update_limit('image_size_total', 1)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and upload a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and upload a second 1MiB image. This succeeds, but
+        # after completion, we are over quota. Despite us being at
+        # quota above, the initial quota check for the second
+        # operation has no idea what the image size will be, and thus
+        # uses delta=0. This will succeed because we're not
+        # technically over-quota and have not asked for any more (this
+        # is oslo.limit behavior). After the second operation,
+        # however, we will be over-quota regardless of the delta and
+        # subsequent attempts will fail. Because glance goes not
+        # require an image size to be declared before upload, this is
+        # really the best it can do without an API change.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a third 1MiB image. This should fail to
+        # upload (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Increase our size quota to 2MiB.
+        self._update_limit('image_size_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a fourth 1MiB image. This should
+        # fail to upload (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Delete our first image to make space in our existing 2MiB quota.
+        self.image_client.delete_image(image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('fc76b8d9-aae5-46fb-9285-099e37f311f7')
+    @utils.services('image', 'identity')
+    def test_image_stage_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Create a staging quota of 1MiB, allow 10MiB of active
+        # images, and a total of ten images.
+        self._update_limit('image_stage_total', 1)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and stage a second 1MiB image. This succeeds, but
+        # after completion, we are over quota.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a third 1MiB image. This should fail to
+        # stage (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Make sure that even though we are over our stage quota, we
+        # can still create and upload an image the regular way.
+        image_upload = self.create_image(name='uploaded',
+                                         container_format='bare',
+                                         disk_format='raw',
+                                         visibility='private')
+        self.image_client.store_image_file(image_upload['id'],
+                                           io.BytesIO(file_content))
+
+        # Increase our stage quota to two MiB.
+        self._update_limit('image_stage_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.stage_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a fourth 1MiB image. This should
+        # fail to stage (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our stage quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.stage_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f801243..5cdbfbf 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -453,11 +453,14 @@
             "volumeAttachments": [{"volumeId": uuids.volume_id}]}
         mock_list_volume_attachments = mock.Mock(
             side_effect=[volume_attached, volume_attached])
+        mock_get_console_output = mock.Mock(
+            return_value={'output': 'output'})
         mock_client = mock.Mock(
             spec=servers_client.ServersClient,
             build_interval=1,
             build_timeout=1,
-            list_volume_attachments=mock_list_volume_attachments)
+            list_volume_attachments=mock_list_volume_attachments,
+            get_console_output=mock_get_console_output)
         self.patch(
             'time.time',
             side_effect=[0., 0.5, mock_client.build_timeout + 1.])
@@ -473,3 +476,22 @@
         mock_list_volume_attachments.assert_has_calls([
             mock.call(uuids.server_id),
             mock.call(uuids.server_id)])
+
+        # Assert that we fetch console output
+        mock_get_console_output.assert_called_once_with(uuids.server_id)
+
+    def test_wait_for_volume_attachment_remove_from_server_not_found(self):
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=lib_exc.NotFound)
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            list_volume_attachments=mock_list_volume_attachments)
+
+        # Assert that nothing is raised when lib_exc_NotFound is raised
+        # by the client call to list_volume_attachments
+        waiters.wait_for_volume_attachment_remove_from_server(
+            mock_client, mock.sentinel.server_id, mock.sentinel.volume_id)
+
+        # Assert that list_volume_attachments was actually called
+        mock_list_volume_attachments.assert_called_once_with(
+            mock.sentinel.server_id)
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
index 5d63dec..edfb2c8 100644
--- a/tempest/tests/lib/cmd/test_check_uuid.py
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -19,7 +19,6 @@
 from unittest import mock
 
 from tempest.lib.cmd import check_uuid
-from tempest.lib import decorators
 from tempest.tests import base
 
 
@@ -29,37 +28,33 @@
            "    def test_tests(self):\n" \
            "        pass"
 
-    def create_tests_file(self, directory):
-        with open(directory + "/__init__.py", "w"):
-            pass
+    def setUp(self):
+        super(TestCLInterface, self).setUp()
+        self.directory = tempfile.mkdtemp(prefix='check-uuid', dir=".")
+        self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
 
-        tests_file = directory + "/tests.py"
-        with open(tests_file, "w") as fake_file:
+        init_file = open(self.directory + "/__init__.py", "w")
+        init_file.close()
+
+        self.tests_file = self.directory + "/tests.py"
+        with open(self.tests_file, "w") as fake_file:
             fake_file.write(TestCLInterface.CODE)
-
-        return tests_file
+            fake_file.close()
 
     def test_fix_argument_no(self):
-        temp_dir = tempfile.mkdtemp(prefix='check-uuid-no', dir=".")
-        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-        tests_file = self.create_tests_file(temp_dir)
         sys.argv = [sys.argv[0]] + ["--package",
-                                    os.path.relpath(temp_dir)]
+                                    os.path.relpath(self.directory)]
 
         self.assertRaises(SystemExit, check_uuid.run)
-        with open(tests_file, "r") as f:
+        with open(self.tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE == f.read())
 
-    @decorators.skip_because(bug='1918316')
     def test_fix_argument_yes(self):
-        temp_dir = tempfile.mkdtemp(prefix='check-uuid-yes', dir=".")
-        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-        tests_file = self.create_tests_file(temp_dir)
 
         sys.argv = [sys.argv[0]] + ["--fix", "--package",
-                                    os.path.relpath(temp_dir)]
+                                    os.path.relpath(self.directory)]
         check_uuid.run()
-        with open(tests_file, "r") as f:
+        with open(self.tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE != f.read())
 
 
diff --git a/tempest/tests/lib/services/identity/v3/test_limit_client.py b/tempest/tests/lib/services/identity/v3/test_limit_client.py
new file mode 100644
index 0000000..07ec6cd
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_limit_client.py
@@ -0,0 +1,82 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import limits_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLimitsClient(base.BaseServiceTest):
+    def setUp(self):
+        super(TestLimitsClient, self).setUp()
+        self.client = limits_client.LimitsClient(
+            fake_auth_provider.FakeAuthProvider(),
+            'identity', 'regionOne')
+
+    def test_get_registered_limits(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.get_registered_limits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            fake_result,
+            False,
+            status=200)
+
+    def test_create_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            description='Spacely Widgets',
+            status=201)
+
+    def test_create_limit_with_domain(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            domain_id='foo',
+            description='Spacely Widgets',
+            status=201)
+
+    def test_update_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            status=200)
+
+    def test_update_limit_with_description(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            description='new description',
+            status=200)
diff --git a/tempest/tests/lib/services/network/test_log_resource_client.py b/tempest/tests/lib/services/network/test_log_resource_client.py
new file mode 100644
index 0000000..ef502bc
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_log_resource_client.py
@@ -0,0 +1,145 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import log_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLogResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "logs": [
+            {
+                "name": "security group log1",
+                "description": "Log for test demo.",
+                "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+                "project_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:03:04Z",
+                "updated_at": "2018-04-03T21:03:04Z",
+                "enabled": True,
+                "revision_number": 1,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            },
+            {
+                "name": "security group log2",
+                "description": "Log for test demo.",
+                "id": "46ebaec1-0570-43ac-82f6-60d2b03168c4",
+                "project_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:04:04Z",
+                "updated_at": "2018-04-03T21:04:04Z",
+                "enabled": True,
+                "revision_number": 2,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            }
+        ]
+    }
+
+    FAKE_LOG_ID = "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
+
+    def setUp(self):
+        super(TestLogResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.log_resource_client = log_resource_client.LogResourceClient(
+            fake_auth, "network", "regionOne")
+
+    def _test_list_logs(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.list_logs,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def _test_show_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.show_log,
+            "tempest.lib.common.rest_client.RestClient.get",
+            {"log": self.FAKE_LOGS["logs"][0]},
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID)
+
+    def _test_create_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.create_log,
+            "tempest.lib.common.rest_client.RestClient.post",
+            {"logs": self.FAKE_LOGS["logs"][1]},
+            bytes_body,
+            201,
+            log_id="2f245a7b-796b-4f26-9cf9-9e82d248fda7")
+
+    def _test_update_log(self, bytes_body=False):
+        update_kwargs = {
+            "tenant_id": "83a5a4f4245a4abbafacb7ca73b027b0"
+        }
+
+        resp_body = {
+            "logs": copy.deepcopy(
+                self.FAKE_LOGS["logs"][0]
+            )
+        }
+        resp_body["logs"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.log_resource_client.update_log,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID,
+            **update_kwargs)
+
+    def test_list_logs_with_str_body(self):
+        self._test_list_logs()
+
+    def test_list_logs_with_bytes_body(self):
+        self._test_list_logs(bytes_body=True)
+
+    def test_create_log_with_str_body(self):
+        self._test_create_log()
+
+    def test_create_log_with_bytes_body(self):
+        self._test_create_log(bytes_body=True)
+
+    def test_show_log_with_str_body(self):
+        self._test_show_log()
+
+    def test_show_log_with_bytes_body(self):
+        self._test_show_log(bytes_body=True)
+
+    def test_update_log_with_str_body(self):
+        self._test_update_log()
+
+    def test_update_log_with_bytes_body(self):
+        self._test_update_log(bytes_body=True)
+
+    def test_delete_log(self):
+        self.check_service_client_function(
+            self.log_resource_client.delete_log,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            status=204,
+            log_id=self.FAKE_LOG_ID)
diff --git a/tempest/tests/lib/services/network/test_loggable_resource_client.py b/tempest/tests/lib/services/network/test_loggable_resource_client.py
new file mode 100644
index 0000000..232775b
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_loggable_resource_client.py
@@ -0,0 +1,53 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import loggable_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLoggableResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "loggable_resources": [
+            {
+                "type": "security_group"
+            },
+            {
+                "type": "none"
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestLoggableResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.loggable_resource_client = \
+            loggable_resource_client.LoggableResourceClient(
+                fake_auth, "network", "regionOne")
+
+    def _test_list_loggable_resources(self, bytes_body=False):
+        self.check_service_client_function(
+            self.loggable_resource_client.list_loggable_resources,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def test_list_loggable_resources_with_str_body(self):
+        self._test_list_loggable_resources()
+
+    def test_list_loggable_resources_with_bytes_body(self):
+        self._test_list_loggable_resources(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..e83792d
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,124 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib import decorators
+
+from tempest.lib.services.network import qos_limit_bandwidth_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_log import log as logging
+LOG = logging.getLogger('tempest')
+
+
+class TestQosLimitBandwidthRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MAX_BW_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MAX_BW_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'max_kbps': 1000,
+        'max_burst_kbps': 0,
+        'direction': 'ingress'
+    }
+
+    FAKE_MAX_BW_RULE_RESPONSE = {
+        'bandwidth_limit_rule': {
+            'id': FAKE_MAX_BW_RULE_ID,
+            'max_kbps': 10000,
+            'max_burst_kbps': 0,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MAX_BW_RULES = {
+        'bandwidth_limit_rules': [
+            FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosLimitBandwidthRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_limit_bw_client = qos_limit_bandwidth_rules_client.\
+            QosLimitBandwidthRulesClient(fake_auth, "network", "regionOne")
+
+    @decorators.idempotent_id('cde981fa-e93b-11eb-aacb-74e5f9e2a801')
+    def test_create_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.create_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MAX_BW_RULE_REQUEST
+        )
+
+    @decorators.idempotent_id('86e6803a-e974-11eb-aacb-74e5f9e2a801')
+    def test_update_limit_bandwidth_rules(self, bytes_body=False):
+        update_kwargs = {
+            "max_kbps": "20000"
+        }
+
+        resp_body = {
+            "bandwidth_limit_rule": copy.deepcopy(
+                self.FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+            )
+        }
+        resp_body["bandwidth_limit_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_limit_bw_client.update_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID,
+            **update_kwargs)
+
+    @decorators.idempotent_id('be60ae6e-e979-11eb-aacb-74e5f9e2a801')
+    def test_show_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.show_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID
+        )
+
+    @decorators.idempotent_id('0a7c0964-e97b-11eb-aacb-74e5f9e2a801')
+    def test_delete_limit_bandwidth_rule(self):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.delete_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID)
+
+    @decorators.idempotent_id('08df88ae-e97d-11eb-aacb-74e5f9e2a801')
+    def test_list_minimum_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.list_limit_bandwidth_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
diff --git a/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..3cc3de3
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,135 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import qos_minimum_packet_rate_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestQosMinimumPacketRateRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MIN_PPS_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MIN_PPS_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'min_kpps': 1000,
+        'direction': 'ingress'
+    }
+
+    FAKE_MIN_PPS_RULE_RESPONSE = {
+        'minimum_packet_rate_rule': {
+            'id': FAKE_MIN_PPS_RULE_ID,
+            'min_kpps': 1000,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MIN_PPS_RULES = {
+        'minimum_packet_rate_rules': [
+            FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosMinimumPacketRateRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_min_pps_client = qos_minimum_packet_rate_rules_client.\
+            QosMinimumPacketRateRulesClient(fake_auth, "network", "regionOne")
+
+    def _test_create_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.create_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MIN_PPS_RULE_REQUEST
+        )
+
+    def _test_list_minimum_packet_rate_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.list_minimum_packet_rate_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
+
+    def _test_show_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.show_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID
+        )
+
+    def _test_update_qos_polcy(self, bytes_body=False):
+        update_kwargs = {
+            "min_kpps": "20000"
+        }
+
+        resp_body = {
+            "minimum_packet_rate_rule": copy.deepcopy(
+                self.FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+            )
+        }
+        resp_body["minimum_packet_rate_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_min_pps_client.update_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID,
+            **update_kwargs)
+
+    def test_create_minimum_packet_rate_rule_with_str_body(self):
+        self._test_create_minimum_packet_rate_rule()
+
+    def test_create_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_create_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_update_minimum_packet_rate_rule_with_str_body(self):
+        self._test_update_qos_polcy()
+
+    def test_update_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_update_qos_polcy(bytes_body=True)
+
+    def test_show_minimum_packet_rate_rule_with_str_body(self):
+        self._test_show_minimum_packet_rate_rule()
+
+    def test_show_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_show_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_delete_minimum_packet_rate_rule(self):
+        self.check_service_client_function(
+            self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID)
+
+    def test_list_minimum_packet_rate_rule_with_str_body(self):
+        self._test_list_minimum_packet_rate_rules()
+
+    def test_list_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_list_minimum_packet_rate_rules(bytes_body=True)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 1b5b369..b96bbe4 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -35,28 +35,48 @@
 # TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
 # future when the patches are merged.
 NON_ACTIVE_LIST = [
-    'x/gce-api',  # It looks gce-api doesn't support python3 yet.
+    'x/gce-api',  # It looks gce-api doesn't support python3 yet
+    # https://bugs.launchpad.net/gce-api/+bug/1931094
     'x/glare',  # To avoid sanity-job failure
-    'x/group-based-policy',  # It looks this doesn't support python3 yet.
-    'x/intel-nfv-ci-tests',  # https://review.opendev.org/#/c/634640/
+    'x/group-based-policy',
+    # https://bugs.launchpad.net/group-based-policy/+bug/1931091
+    'x/intel-nfv-ci-tests',  # To avoid sanity-job failure
     'openstack/networking-generic-switch',
+    # This is not a real tempest plugin,
     # https://review.opendev.org/#/c/634846/
-    'x/networking-l2gw-tempest-plugin',
-    # https://review.opendev.org/#/c/635093/
-    'openstack/networking-midonet',  # https://review.opendev.org/#/c/635096/
-    'x/networking-plumgrid',  # https://review.opendev.org/#/c/635096/
+    'x/networking-plumgrid',  # No longer contains tempest tests
     'x/networking-spp',  # https://review.opendev.org/#/c/635098/
+    # networking-spp is missing neutron-tempest-plugin as a dep plus
+    # test-requirements.txt is nested in a openstack dir and sanity script
+    # doesn't count with such scenario yet
     'openstack/neutron-dynamic-routing',
+    # As tests have been migrated to neutron-tempest-plugin:
     # https://review.opendev.org/#/c/637718/
-    'openstack/neutron-vpnaas',  # https://review.opendev.org/#/c/637719/
-    'x/tap-as-a-service',  # To avoid sanity-job failure
-    'x/valet',  # https://review.opendev.org/#/c/638339/
-    'x/kingbird',  # https://bugs.launchpad.net/kingbird/+bug/1869722
-    # vmware-nsx is excluded since https://review.opendev.org/#/c/736952
-    'x/vmware-nsx-tempest-plugin',
+    'openstack/neutron-vpnaas',
+    # As tests have been migrated to neutron-tempest-plugin:
+    # https://review.opendev.org/c/openstack/neutron-vpnaas/+/695834
+    'x/valet',  # valet is unmaintained now
+    # https://review.opendev.org/c/x/valet/+/638339
+    'x/kingbird',  # kingbird is unmaintained now
+    # https://bugs.launchpad.net/kingbird/+bug/1869722
+    'x/mogan',
     # mogan is unmaintained now, remove from the list when this is merged:
     # https://review.opendev.org/c/x/mogan/+/767718
-    'x/mogan',
+    'x/vmware-nsx-tempest-plugin'
+    # Failing since 2021-08-27
+    # https://zuul.opendev.org/t/openstack/build
+    # /45f6c8d3c62d4387a70b7b471ec687c8
+    # Below plugins failing for error in psycopg2 __init__
+    # ImportError: libpq.so.5: cannot open shared object
+    # file: No such file or directory
+    # https://zuul.opendev.org/t/openstack/build
+    # /b61a48196dfa476d83645aea4853e544/log/job-output.txt#271722
+    # Failing since 2021-09-08
+    'x/networking-l2gw-tempest-plugin'
+    'x/novajoin-tempest-plugin'
+    'x/ranger-tempest-plugin'
+    'x/tap-as-a-service-tempest-plugin'
+    'x/trio2o'
 ]
 
 url = 'https://review.opendev.org/projects/'
diff --git a/tox.ini b/tox.ini
index cd32174..efdaacc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -125,6 +125,18 @@
     find . -type f -name "*.pyc" -delete
     tempest run --regex '(^tempest\.scenario.*)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
 
+[testenv:api-microversion-tests]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select all tempest api tests for services having API
+# microversion concept.
+commands =
+    find . -type f -name "*.pyc" -delete
+    tempest run --regex '(^tempest\.api\.compute)|(^tempest\.api\.volume)' {posargs}
+
 [testenv:integrated-network]
 envdir = .tox/tempest
 sitepackages = {[tempestenv]sitepackages}
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 622bbad..19c25c7 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -82,21 +82,9 @@
         GLANCE_USE_IMPORT_WORKFLOW: True
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
       devstack_services:
         # Enbale horizon so that we can run horizon test.
         horizon: true
-        neutron-placement: true
-        neutron-qos: true
 
 - job:
     name: tempest-integrated-networking
@@ -141,6 +129,20 @@
         c-bak: false
 
 - job:
+    name: tempest-integrated-compute-centos-8-stream
+    parent: tempest-integrated-compute
+    nodeset: devstack-single-node-centos-8-stream
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+      This version of the job also uses CentOS 8 stream.
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
     name: tempest-integrated-placement
     parent: devstack-tempest
     branches: ^(?!stable/ocata).*$
@@ -194,6 +196,20 @@
         USE_PYTHON3: true
 
 - job:
+    name: tempest-with-latest-microversion
+    parent: tempest-full-py3
+    description: |
+      This job runs compute, placement and volume API tests with 'latest'
+      API microversion (This can be extended to other services having API
+      microversion concept).
+    vars:
+      tox_envlist: api-microversion-tests
+      devstack_localrc:
+        TEMPEST_COMPUTE_MIN_MICROVERSION: 'latest'
+        TEMPEST_VOLUME_MIN_MICROVERSION: 'latest'
+        TEMPEST_PLACEMENT_MIN_MICROVERSION: 'latest'
+
+- job:
     name: tempest-multinode-full
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-focal
@@ -215,30 +231,12 @@
         USE_PYTHON3: true
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
       devstack_services:
-        neutron-placement: true
-        neutron-qos: true
         neutron-trunk: true
     group-vars:
       subnode:
         devstack_localrc:
           USE_PYTHON3: true
-        devstack_local_conf:
-          post-config:
-            "/$NEUTRON_CORE_PLUGIN_CONF":
-              ovs:
-                bridge_mappings: public:br-ex
-                resource_provider_bandwidths: br-ex:1000000:1000000
 
 - job:
     name: tempest-slow
@@ -254,7 +252,7 @@
     timeout: 10800
     # This job runs on stable/stein onwards.
     branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
-    vars:
+    vars: &tempest_slow_vars
       tox_envlist: slow-serial
       devstack_localrc:
         CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
@@ -275,10 +273,12 @@
 
 - job:
     name: tempest-slow-py3
-    parent: tempest-slow
+    parent: tempest-multinode-full-py3
     # This job version is with swift enabled on py3
     # as swift is ready on py3 from stable/ussuri onwards.
+    timeout: 10800
     branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    vars: *tempest_slow_vars
 
 - job:
     name: tempest-cinder-v2-api
@@ -332,9 +332,11 @@
     check:
       jobs:
         - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
     gate:
       jobs:
         - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
 
 - project-template:
     name: integrated-gate-placement
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 698df53..0d6178d 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -31,6 +31,8 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-xena:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-wallaby-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-victoria-py3:
@@ -54,9 +56,44 @@
               - ^.gitignore$
               - ^.gitreview$
               - ^.mailmap$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/verify-ipv6-only-deployments.sh
+              - ^tools/with_venv.sh
               # tools/ is not here since this relies on a script in tools/.
         - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
+            irrelevant-files: &tempest-irrelevant-files-3
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/generate-tempest-plugins-list.py
+              - ^tools/generate-tempest-plugins-list.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/tempest-plugin-sanity.sh
+              - ^tools/with_venv.sh
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - nova-live-migration:
@@ -64,23 +101,11 @@
             irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
+        - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr:
+        - neutron-ovs-tempest-dvr:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
         - interop-tempest-consistency:
@@ -93,28 +118,33 @@
             irrelevant-files: *tempest-irrelevant-files
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
     gate:
       jobs:
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
+        - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
+            irrelevant-files: *tempest-irrelevant-files-3
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-all:
             irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr-ha-multinode-full:
+        - neutron-ovs-tempest-dvr-ha-multinode-full:
             irrelevant-files: *tempest-irrelevant-files
         - nova-tempest-v2-api:
             irrelevant-files: *tempest-irrelevant-files
@@ -126,6 +156,7 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-xena
         - tempest-full-wallaby-py3
         - tempest-full-victoria-py3
         - tempest-full-ussuri-py3
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 852bafb..e682457 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-xena
+    parent: tempest-full-py3
+    override-checkout: stable/xena
+
+- job:
     name: tempest-full-wallaby-py3
     parent: tempest-full-py3
     override-checkout: stable/wallaby
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 5063d89..051d8b0 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -78,6 +78,18 @@
     voting: false
 
 - job:
+    name: tempest-full-py3-centos-8-stream
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-centos-8-stream
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on CentOS 8 stream
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
     name: tempest-tox-plugin-sanity-check
     parent: tox
     description: |