Merge "Tests for nova unified quotas"
diff --git a/HACKING.rst b/HACKING.rst
index 95bcbb5..dc28e4e 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -26,6 +26,7 @@
 - [T116] Unsupported 'message' Exception attribute in PY3
 - [T117] Check negative tests have ``@decorators.attr(type=['negative'])``
   applied.
+- [T118] LOG.warn is deprecated. Enforce use of LOG.warning.
 
 It is recommended to use ``tox -eautopep8`` before submitting a patch.
 
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 0b80b72..f5da6f9 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -350,6 +350,10 @@
 
   .. _2.32: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id29
 
+  * `2.33`_
+
+  .. _2.33: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id30
+
   * `2.36`_
 
   .. _2.36: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#microversion
@@ -382,6 +386,10 @@
 
   .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
 
+  * `2.50`_
+
+  .. _2.50: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id46
+
   * `2.53`_
 
   .. _2.53: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-pike
@@ -414,6 +422,10 @@
 
   .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
 
+  * `2.64`_
+
+  .. _2.64: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id59
+
   * `2.70`_
 
   .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
@@ -428,7 +440,11 @@
 
   * `2.79`_
 
-  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train 
+  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train
+
+  * `2.86`_
+
+  .. _2.86: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id79
 
 * Volume
 
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index ff4fa09..a3bb645 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -105,16 +105,16 @@
 value.
 
 However, for the 3rd prong verification is a bit more subjective. The original
-intent of this prong was mostly for refstack/defcore and also for things that
+intent of this prong was mostly for interop/refstack and also for things that
 running on the stable branches. We don't want to remove any tests if that
 would break our API consistency checking between releases, or something that
-defcore/refstack is depending on being in Tempest. It's worth pointing out
-that if a test is used in `defcore`_ as part of `interop`_ testing then it will
-probably have continuing value being in Tempest as part of the
+interop/refstack is depending on being in Tempest. It's worth pointing out
+that if a test is used in `interop_wg`_ as part of `interop`_ testing then it
+will probably have continuing value being in Tempest as part of the
 integration/integrated tests in general. This is one area where some overlap
 is expected between testing in projects and Tempest, which is not a bad thing.
 
-.. _defcore: https://wiki.openstack.org/wiki/Governance/InteropWG
+.. _interop_wg: https://docs.opendev.org/openinfra/interop/latest/
 .. _interop: https://www.openstack.org/brand/interop
 
 Discussing the 3rd prong
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..c8f042d
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+  tasks:
+    - include_role:
+        name: enable-fips
diff --git a/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
new file mode 100644
index 0000000..da58ba3
--- /dev/null
+++ b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add "QoS bandwidth limit rules" APIs to:
+    "tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py"  module.
+
+    * List bandwidth limit rules for QoS policy
+    * Create bandwidth limit rule
+    * Show bandwidth limit rule details
+    * Update bandwidth limit rule
+    * Delete bandwidth limit rule
\ No newline at end of file
diff --git a/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
new file mode 100644
index 0000000..b65b164
--- /dev/null
+++ b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Added QoS minimum packet rate rule client:
+
+    * create_minimum_packet_rate_rule
+    * update_minimum_packet_rate_rule
+    * show_minimum_packet_rate_rule
+    * list_minimum_packet_rate_rules
+    * delete_minimum_packet_rate_rule also
diff --git a/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml b/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml
new file mode 100644
index 0000000..fef3004
--- /dev/null
+++ b/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add parameter to specify the SSH key type.  Current options are 'rsa'
+    (which is the default) and 'ecdsa'.  Tempest now supports the importing
+    and generation of both 'rsa' and 'ecdsa' SSH key types.
diff --git a/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
new file mode 100644
index 0000000..be2df6b
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now enabled by default.
+deprecations:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now deprecated.
diff --git a/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml b/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml
new file mode 100644
index 0000000..652f7fa
--- /dev/null
+++ b/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+  - |
+    Old APIMicroversionFixture classes ``tempest.api.compute.api_microversion_fixture.APIMicroversionFixture``
+    and ``tempest.api.volume.api_microversion_fixture.APIMicroversionFixture``
+    has been deprecated for removal in favor of new location
+    ``tempest.lib.common.api_microversion_fixture.APIMicroversionFixture``
diff --git a/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
new file mode 100644
index 0000000..3aaec69
--- /dev/null
+++ b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add a new client to lists, creates, shows information for,
+    updates and deletes neutron floating ips port forwarding
+    resource.
diff --git a/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
new file mode 100644
index 0000000..ac83eaf
--- /dev/null
+++ b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Lists neutron's Loggable resources API service clients are available in
+    ``tempest/lib/services/network/loggable_resource_client.py`` module.
\ No newline at end of file
diff --git a/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
new file mode 100644
index 0000000..0d964a9
--- /dev/null
+++ b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    [`bug 1948935 <https://bugs.launchpad.net/tempest/+bug/1948935>`_]
+    The default value of account-generator --concurrency parameter is now
+    set to 2 instead of 1.
diff --git a/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
new file mode 100644
index 0000000..9e48510
--- /dev/null
+++ b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Improve cleanup after Swift testing. Ensures containers are empty before
+    deleting to prevent errors due to delayed execution.
diff --git a/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
new file mode 100644
index 0000000..218d8ca
--- /dev/null
+++ b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
@@ -0,0 +1,18 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Xena release.
+    This release marks the start of Xena release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Xena
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Yoga development
+    cycle. Every Tempest commit is also tested against master during
+    the Yoga cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Yoga (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Xena release.
+
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index ed0a09f..6a1f8b4 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@
    :maxdepth: 1
 
    unreleased
+   v29.0.0
+   v28.1.0
+   v28.0.0
    v27.0.0
    v26.1.0
    v26.0.0
diff --git a/releasenotes/source/v28.0.0.rst b/releasenotes/source/v28.0.0.rst
new file mode 100644
index 0000000..19d4218
--- /dev/null
+++ b/releasenotes/source/v28.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.0.0 Release Notes
+=====================
+.. release-notes:: 28.0.0 Release Notes
+   :version: 28.0.0
diff --git a/releasenotes/source/v28.1.0.rst b/releasenotes/source/v28.1.0.rst
new file mode 100644
index 0000000..3cc3478
--- /dev/null
+++ b/releasenotes/source/v28.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.1.0 Release Notes
+=====================
+.. release-notes:: 28.1.0 Release Notes
+   :version: 28.1.0
diff --git a/releasenotes/source/v29.0.0.rst b/releasenotes/source/v29.0.0.rst
new file mode 100644
index 0000000..d367a59
--- /dev/null
+++ b/releasenotes/source/v29.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v29.0.0 Release Notes
+=====================
+.. release-notes:: 29.0.0 Release Notes
+   :version: 29.0.0
diff --git a/requirements.txt b/requirements.txt
index c71cabe..c4c7fcc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,6 +6,7 @@
 jsonschema>=3.2.0 # MIT
 testtools>=2.2.0 # MIT
 paramiko>=2.7.0 # LGPLv2.1+
+cryptography>=2.1 # BSD/Apache-2.0
 netaddr>=0.7.18 # BSD
 oslo.concurrency>=3.26.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
@@ -20,4 +21,3 @@
 PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
-unittest2>=1.1.0 # BSD
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 0c72b69..1919393 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/stein.
+   Upper constraints file to be used for stable branch till stable/train.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index a8b3ede..397de1e 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/stein
+- name: Use stable branch upper-constraints till stable/train
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -78,6 +78,17 @@
         exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
       when: exclude_list_stat.stat.exists
 
+- name: stable/train workaround to fallback exclude-list to blacklist
+  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
+  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
+  # does not have new args exclude-list so let's fallback to old arg
+  # if new arg is passed.
+  set_fact:
+    exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
+  when:
+    - tempest_test_exclude_list is defined
+    - target_branch == "stable/train"
+
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
 # compatibility
@@ -94,6 +105,19 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
+    - target_branch != "stable/train"
+
+- name: stable/train workaround to fallback exclude-regex to black-regex
+  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
+  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
+  # does not have new args exclude-regex so let's fallback to old arg
+  # if new arg is passed.
+  set_fact:
+    tempest_test_exclude_regex: "--black-regex={{tempest_exclude_regex|quote}}"
+  when:
+    - tempest_black_regex is not defined
+    - tempest_exclude_regex is defined
+    - target_branch == "stable/train"
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
diff --git a/setup.cfg b/setup.cfg
index d885db0..a41eccf 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,7 @@
     Programming Language :: Python :: 3.6
     Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
+    Programming Language :: Python :: 3.9
     Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: Implementation :: CPython
 
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 4cc5fdd..f54fb22 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -119,3 +119,5 @@
         self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
         self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
                                                agents))
+        for agent in agents:
+            self.assertEqual(agent_xen['hypervisor'], agent['hypervisor'])
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 2716259..a6c6535 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -17,6 +17,7 @@
 
 from tempest.api.compute import base
 from tempest.common import tempest_fixtures as fixtures
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
@@ -237,6 +238,10 @@
                                          wait_until='ACTIVE')
         server_host = self.get_host_for_server(server['id'])
         self.assertEqual(host, server_host)
+        self.servers_client.delete_server(server['id'])
+        # NOTE(gmann): We need to wait for the server to delete before
+        # addCleanup remove the host from aggregate.
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
 
 
 class AggregatesAdminTestV241(AggregatesAdminTestBase):
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4c531b3..10018fe 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -127,3 +127,34 @@
             self.flavor['id'], 'hw:numa_nodes')
         self.assertEqual(body['hw:numa_nodes'], '1')
         self.assertNotIn('hw:cpu_policy', body)
+
+
+class FlavorMetadataValidation(base.BaseV2ComputeAdminTest):
+
+    min_microversion = '2.86'
+
+    @classmethod
+    def resource_setup(cls):
+        super(FlavorMetadataValidation, cls).resource_setup()
+        cls.flavor_name_prefix = 'test_flavor_validate_metadata_'
+        cls.ram = 512
+        cls.vcpus = 1
+        cls.disk = 10
+        cls.ephemeral = 10
+        cls.swap = 1024
+        cls.rxtx = 2
+
+    @decorators.idempotent_id('d3114f03-b0f2-4dc7-be11-70c0abc178b3')
+    def test_flavor_update_with_custom_namespace(self):
+        """Test flavor creation with a custom namespace, key and value"""
+        flavor_name = data_utils.rand_name(self.flavor_name_prefix)
+        flavor_id = self.create_flavor(ram=self.ram,
+                                       vcpus=self.vcpus,
+                                       disk=self.disk,
+                                       name=flavor_name)['id']
+        specs = {'hw:cpu_policy': 'shared', 'foo:bar': 'baz'}
+        body = self.admin_flavors_client.set_flavor_extra_spec(
+            flavor_id,
+            **specs)['extra_specs']
+        self.assertEqual(body['foo:bar'], 'baz')
+        self.assertEqual(body['hw:cpu_policy'], 'shared')
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 347193d..c7a1201 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -145,3 +145,26 @@
         hypers = self.client.search_hypervisor(
             hypers[0]['hypervisor_hostname'])['hypervisors']
         self.assertNotEmpty(hypers, "No hypervisors found.")
+
+
+class HypervisorAdminV253TestBase(base.BaseV2ComputeAdminTest):
+    """Tests Hypervisors API above 2.53 that require admin privileges"""
+
+    min_microversion = '2.53'
+
+    @classmethod
+    def setup_clients(cls):
+        super(HypervisorAdminV253TestBase, cls).setup_clients()
+        cls.client = cls.os_admin.hypervisor_client
+
+    @decorators.idempotent_id('4ab54a14-77a2-4e39-b9d2-1306d157c705')
+    def test_list_show_detail_hypervisors(self):
+        """Verify the list, list details, and show hypevisors
+
+        This verify the Hypervisor API response schema with v2.53 microversion
+        """
+        self.client.list_hypervisors(
+            detail=True, with_servers=True)['hypervisors']
+        hypers = self.client.list_hypervisors(with_servers=True)['hypervisors']
+        self.client.show_hypervisor(
+            hypers[0]['id'], with_servers=True)['hypervisor']
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index f8ad220..caf4fc1 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -268,3 +268,14 @@
             'default')['quota_class_set']
         self.assertThat(show_body.items(),
                         matchers.ContainsAll(body.items()))
+
+
+class QuotaClassesAdmin257Test(QuotaClassesAdminTestJSON):
+    """Test compute quotas with microversion greater than 2.56
+
+    # NOTE(gmann): This test tests the Quota class APIs response schema
+    # for 2.57 microversion. No specific assert or behaviour verification
+    # is needed.
+    """
+
+    min_microversion = '2.57'
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index cf8c560..549d4fb 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -48,8 +48,8 @@
 
         :param return image_id: The UUID of the newly created image.
         """
-        image = self.image_client.show_image(CONF.compute.image_ref)
-        image_data = self.image_client.show_image_file(
+        image = self.admin_image_client.show_image(CONF.compute.image_ref)
+        image_data = self.admin_image_client.show_image_file(
             CONF.compute.image_ref).data
         image_file = io.BytesIO(image_data)
         create_dict = {
@@ -60,11 +60,11 @@
             'visibility': 'public',
         }
         create_dict.update(kwargs)
-        new_image = self.image_client.create_image(**create_dict)
-        self.addCleanup(self.image_client.wait_for_resource_deletion,
+        new_image = self.admin_image_client.create_image(**create_dict)
+        self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
                         new_image['id'])
-        self.addCleanup(self.image_client.delete_image, new_image['id'])
-        self.image_client.store_image_file(new_image['id'], image_file)
+        self.addCleanup(self.admin_image_client.delete_image, new_image['id'])
+        self.admin_image_client.store_image_file(new_image['id'], image_file)
 
         return new_image['id']
 
diff --git a/tempest/api/compute/api_microversion_fixture.py b/tempest/api/compute/api_microversion_fixture.py
index 695af52..1f55a65 100644
--- a/tempest/api/compute/api_microversion_fixture.py
+++ b/tempest/api/compute/api_microversion_fixture.py
@@ -13,14 +13,23 @@
 # under the License.
 
 import fixtures
+from oslo_log import log as logging
 
 from tempest.lib.services.compute import base_compute_client
 
+LOG = logging.getLogger(__name__)
+
 
 class APIMicroversionFixture(fixtures.Fixture):
 
     def __init__(self, compute_microversion):
         self.compute_microversion = compute_microversion
+        new_fixture = (
+            'tempest.lib.common.api_microversion_fixture.'
+            'APIMicroversionFixture')
+        LOG.warning("%s class is deprecated and moved to %s. It"
+                    " will be removed in Z cycle.",
+                    self.__class__.__name__, new_fixture)
 
     def _setUp(self):
         super(APIMicroversionFixture, self)._setUp()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8d249ff..e16afaf 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -21,7 +21,6 @@
 from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
-from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import api_version_request
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
@@ -164,6 +163,11 @@
             api_version_utils.select_request_microversion(
                 cls.placement_min_microversion,
                 CONF.placement.min_microversion))
+        cls.setup_api_microversion_fixture(
+            compute_microversion=cls.request_microversion,
+            volume_microversion=cls.volume_request_microversion,
+            placement_microversion=cls.placement_request_microversion)
+
         cls.build_interval = CONF.compute.build_interval
         cls.build_timeout = CONF.compute.build_timeout
         cls.image_ref = CONF.compute.image_ref
@@ -302,10 +306,18 @@
     def create_test_server_group(cls, name="", policy=None):
         if not name:
             name = data_utils.rand_name(cls.__name__ + "-Server-Group")
-        if policy is None:
-            policy = ['affinity']
+        if cls.is_requested_microversion_compatible('2.63'):
+            policy = policy or ['affinity']
+            if not isinstance(policy, list):
+                policy = [policy]
+            kwargs = {'policies': policy}
+        else:
+            policy = policy or 'affinity'
+            if isinstance(policy, list):
+                policy = policy[0]
+            kwargs = {'policy': policy}
         body = cls.server_groups_client.create_server_group(
-            name=name, policies=policy)['server_group']
+            name=name, **kwargs)['server_group']
         cls.addClassResourceCleanup(
             test_utils.call_and_ignore_notfound_exc,
             cls.server_groups_client.delete_server_group,
@@ -454,6 +466,12 @@
         server = self.servers_client.show_server(server_id)['server']
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
+    def reboot_server(self, server_id, type):
+        """Reboot a server and wait for it to be ACTIVE."""
+        self.servers_client.reboot_server(server_id, type=type)
+        waiters.wait_for_server_status(
+            self.servers_client, server_id, 'ACTIVE')
+
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
@@ -477,28 +495,8 @@
         :param validation_resources: The dict of validation resources
             provisioned for the server.
         """
-        if CONF.validation.connect_method == 'floating':
-            if validation_resources:
-                return validation_resources['floating_ip']['ip']
-            else:
-                msg = ('When validation.connect_method equals floating, '
-                       'validation_resources cannot be None')
-                raise lib_exc.InvalidParam(invalid_param=msg)
-        elif CONF.validation.connect_method == 'fixed':
-            addresses = server['addresses'][CONF.validation.network_for_ssh]
-            for address in addresses:
-                if address['version'] == CONF.validation.ip_version_for_ssh:
-                    return address['addr']
-            raise exceptions.ServerUnreachable(server_id=server['id'])
-        else:
-            raise lib_exc.InvalidConfiguration()
-
-    def setUp(self):
-        super(BaseV2ComputeTest, self).setUp()
-        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            compute_microversion=self.request_microversion,
-            volume_microversion=self.volume_request_microversion,
-            placement_microversion=self.placement_request_microversion))
+        return compute.get_server_ip(
+            server, validation_resources=validation_resources)
 
     @classmethod
     def create_volume(cls, image_ref=None, **kwargs):
@@ -652,7 +650,7 @@
             cls.os_admin.availability_zone_client)
         cls.admin_flavors_client = cls.os_admin.flavors_client
         cls.admin_servers_client = cls.os_admin.servers_client
-        cls.image_client = cls.os_admin.image_client_v2
+        cls.admin_image_client = cls.os_admin.image_client_v2
         cls.admin_assisted_volume_snapshots_client = \
             cls.os_admin.assisted_volume_snapshots_client
 
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 671a779..a1f3514 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -109,9 +109,7 @@
                           sg['id'])
 
         # Reboot and add the other security group
-        self.servers_client.reboot_server(server_id, type='HARD')
-        waiters.wait_for_server_status(self.servers_client, server_id,
-                                       'ACTIVE')
+        self.reboot_server(server_id, type='HARD')
         self.servers_client.add_security_group(server_id, name=sg2['name'])
 
         # Check that we are not able to delete the other security
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index ac18442..efecd6c 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -68,7 +68,8 @@
             self.image_ssh_password,
             validation_resources['keypair']['private_key'],
             server=server,
-            servers_client=self.servers_client)
+            servers_client=self.servers_client,
+            ssh_key_type=CONF.validation.ssh_key_type)
         linux_client.validate_authentication()
 
     def _create_server_get_interfaces(self):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 58d4d7d..d099fce 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -35,6 +35,8 @@
 
 class DeviceTaggingBase(base.BaseV2ComputeTest):
 
+    credentials = ['primary', 'admin']
+
     @classmethod
     def skip_checks(cls):
         super(DeviceTaggingBase, cls).skip_checks()
@@ -54,6 +56,7 @@
         cls.ports_client = cls.os_primary.ports_client
         cls.subnets_client = cls.os_primary.subnets_client
         cls.interfaces_client = cls.os_primary.interfaces_client
+        cls.servers_admin_client = cls.os_admin.servers_client
 
     @classmethod
     def setup_credentials(cls):
@@ -209,7 +212,7 @@
 
         server = self.create_test_server(
             validatable=True,
-            wait_until='ACTIVE',
+            wait_until='SSHABLE',
             validation_resources=validation_resources,
             config_drive=config_drive_enabled,
             name=data_utils.rand_name('device-tagging-server'),
@@ -332,7 +335,9 @@
     def verify_device_metadata(self, md_json):
         try:
             md_dict = json.loads(md_json)
-        except (json_decoder.JSONDecodeError, TypeError):
+        except (json_decoder.JSONDecodeError, TypeError) as e:
+            LOG.warning(
+                'Failed to decode json metadata: %s, %s', str(e), str(md_json))
             return False
 
         found_devices = [d['tags'][0] for d in md_dict['devices']
@@ -342,7 +347,9 @@
                 sorted(found_devices),
                 sorted(['nic-tag', 'volume-tag']))
             return True
-        except Exception:
+        except Exception as e:
+            LOG.warning(
+                'Failed to parse metadata: %s, %s', str(e), str(md_json))
             return False
 
     def verify_empty_devices(self, md_json):
@@ -422,11 +429,13 @@
         self.servers_client.detach_volume(server['id'], volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
-        self.interfaces_client.delete_interface(server['id'],
-                                                interface['port_id'])
-        waiters.wait_for_interface_detach(self.interfaces_client,
+        req_id = self.interfaces_client.delete_interface(
+            server['id'], interface['port_id']
+        ).response['x-openstack-request-id']
+        waiters.wait_for_interface_detach(self.servers_admin_client,
                                           server['id'],
-                                          interface['port_id'])
+                                          interface['port_id'],
+                                          req_id)
         # FIXME(mriedem): The assertion that the tagged devices are removed
         # from the metadata for the server is being skipped until bug 1775947
         # is fixed.
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index 5ab592a..028da68 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -37,9 +37,7 @@
     @decorators.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
     def test_list_instance_actions(self):
         """Test listing actions of the provided server"""
-        self.client.reboot_server(self.server['id'], type='HARD')
-        waiters.wait_for_server_status(self.client,
-                                       self.server['id'], 'ACTIVE')
+        self.reboot_server(self.server['id'], type='HARD')
 
         body = (self.client.list_instance_actions(self.server['id'])
                 ['instanceActions'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index deb21c7..c415c00 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -43,6 +43,17 @@
         super(ServerActionsTestJSON, self).setUp()
         # Check if the server is in a clean state after test
         try:
+            validation_resources = self.get_class_validation_resources(
+                self.os_primary)
+            # _test_rebuild_server test compares ip address attached to the
+            # server before and after the rebuild, in order to avoid
+            # a situation when a newly created server doesn't have a floating
+            # ip attached at the beginning of the test_rebuild_server let's
+            # make sure right here the floating ip is attached
+            waiters.wait_for_server_floating_ip(
+                self.client,
+                self.client.show_server(self.server_id)['server'],
+                validation_resources['floating_ip'])
             waiters.wait_for_server_status(self.client,
                                            self.server_id, 'ACTIVE')
         except lib_exc.NotFound:
@@ -136,8 +147,7 @@
             # in a server
             linux_client.exec_command("sync")
 
-        self.client.reboot_server(self.server_id, type=reboot_type)
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type=reboot_type)
 
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
@@ -607,8 +617,7 @@
         # log file is truncated and we cannot get any console log through
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
-        self.client.reboot_server(self.server_id, type='HARD')
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type='HARD')
         self.wait_for(self._get_output)
 
     @decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 4c0d021..4811a7b 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -44,9 +44,21 @@
         cls.client = cls.server_groups_client
 
     @classmethod
+    def _set_policy(cls, policy):
+        if not cls.is_requested_microversion_compatible('2.63'):
+            return policy[0]
+        else:
+            return policy
+
+    @classmethod
     def resource_setup(cls):
         super(ServerGroupTestJSON, cls).resource_setup()
-        cls.policy = ['affinity']
+        if cls.is_requested_microversion_compatible('2.63'):
+            cls.policy_field = 'policies'
+            cls.policy = ['affinity']
+        else:
+            cls.policy_field = 'policy'
+            cls.policy = 'affinity'
 
     def setUp(self):
         super(ServerGroupTestJSON, self).setUp()
@@ -61,9 +73,9 @@
 
     def _create_server_group(self, name, policy):
         # create the test server-group with given policy
-        server_group = {'name': name, 'policies': policy}
+        server_group = {'name': name, self.policy_field: policy}
         body = self.create_test_server_group(name, policy)
-        for key in ['name', 'policies']:
+        for key in ['name', self.policy_field]:
             self.assertEqual(server_group[key], body[key])
         return body
 
@@ -88,7 +100,7 @@
     @decorators.idempotent_id('3645a102-372f-4140-afad-13698d850d23')
     def test_create_delete_server_group_with_anti_affinity_policy(self):
         """Test Create/Delete the server-group with anti-affinity policy"""
-        policy = ['anti-affinity']
+        policy = self._set_policy(['anti-affinity'])
         self._create_delete_server_group(policy)
 
     @decorators.idempotent_id('154dc5a4-a2fe-44b5-b99e-f15806a4a113')
@@ -99,7 +111,7 @@
         for _ in range(0, 2):
             server_groups.append(self._create_server_group(server_group_name,
                                                            self.policy))
-        for key in ['name', 'policies']:
+        for key in ['name', self.policy_field]:
             self.assertEqual(server_groups[0][key], server_groups[1][key])
         self.assertNotEqual(server_groups[0]['id'], server_groups[1]['id'])
 
@@ -134,3 +146,24 @@
         server_group = (self.server_groups_client.show_server_group(
             self.created_server_group['id'])['server_group'])
         self.assertIn(server['id'], server_group['members'])
+
+
+class ServerGroup264TestJSON(base.BaseV2ComputeTest):
+    """These tests check for the server-group APIs 2.64 microversion.
+
+    This tests is only to verify the POST, GET server-groups APIs response
+    schema with 2.64 microversion
+    """
+    create_default_network = True
+    min_microversion = '2.64'
+
+    @decorators.idempotent_id('b52f09dd-2133-4037-9a5d-bdb260096a88')
+    def test_create_get_server_group(self):
+        # create, get the test server-group with given policy
+        server_group = self.create_test_server_group(
+            name='server-group', policy='affinity')
+        self.addCleanup(
+            self.server_groups_client.delete_server_group,
+            server_group['id'])
+        self.server_groups_client.list_server_groups()
+        self.server_groups_client.show_server_group(server_group['id'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 7251e36..e4ec209 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -49,7 +49,7 @@
         server = self.create_test_server(
             validatable=True,
             validation_resources=validation_resources,
-            wait_until='ACTIVE',
+            wait_until='SSHABLE',
             adminPass=self.image_ssh_password)
         self.addCleanup(self.delete_server, server['id'])
         # Record addresses so that we can ssh later
@@ -455,6 +455,8 @@
 
     @utils.services('image')
     @decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
+    @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
+                          'Snapshotting is not available.')
     def test_snapshot_volume_backed_multiattach(self):
         """Boots a server from a multiattach volume and snapshots the server.
 
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 516f599..43b4bf5 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.compute import base
+from tempest.api.compute.volumes import test_attach_volume
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -20,24 +20,15 @@
 CONF = config.CONF
 
 
-class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+class AttachVolumeNegativeTest(test_attach_volume.BaseAttachVolumeTest):
     """Negative tests of volume attaching"""
 
-    create_default_network = True
-
-    @classmethod
-    def skip_checks(cls):
-        super(AttachVolumeNegativeTest, cls).skip_checks()
-        if not CONF.service_available.cinder:
-            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
     @decorators.attr(type=['negative'])
     @decorators.related_bug('1630783', status_code=500)
     @decorators.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
     def test_delete_attached_volume(self):
         """Test deleting attachemd volume should fail"""
-        server = self.create_test_server(wait_until='ACTIVE')
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
         self.attach_volume(server, volume)
 
@@ -54,7 +45,7 @@
         depending on whether or not cinder v3.27 is being used to attach
         the volume to the instance.
         """
-        server = self.create_test_server(wait_until='ACTIVE')
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
 
         self.attach_volume(server, volume)
@@ -66,12 +57,12 @@
     @decorators.idempotent_id('ee37a796-2afb-11e7-bc0f-fa163e65f5ce')
     def test_attach_attached_volume_to_different_server(self):
         """Test attaching attached volume to different server should fail"""
-        server1 = self.create_test_server(wait_until='ACTIVE')
+        server1, validation_resources = self._create_server()
         volume = self.create_volume()
 
         self.attach_volume(server1, volume)
 
         # Create server2 and attach in-use volume
-        server2 = self.create_test_server(wait_until='ACTIVE')
+        server2, validation_resources = self._create_server()
         self.assertRaises(lib_exc.BadRequest,
                           self.attach_volume, server2, volume)
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 32ccb9e..419c6c7 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -157,4 +157,4 @@
         self.addCleanup(self.delete_domain, domain['id'])
         expected_data = {'name': d_name, 'enabled': True}
         self.assertEqual('', domain['description'])
-        self.assertDictContainsSubset(expected_data, domain)
+        self.assertLessEqual(expected_data.items(), domain.items())
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index a649d27..fb3b03e 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -44,7 +44,7 @@
 
         # Verifying response body of create service
         expected_data = {'name': name, 'type': serv_type, 'description': desc}
-        self.assertDictContainsSubset(expected_data, create_service)
+        self.assertLessEqual(expected_data.items(), create_service.items())
 
         # Update description
         s_id = create_service['id']
@@ -61,7 +61,7 @@
         resp3_desc = fetched_service['description']
 
         self.assertEqual(resp2_desc, resp3_desc)
-        self.assertDictContainsSubset(update_service, fetched_service)
+        self.assertLessEqual(update_service.items(), fetched_service.items())
 
     @decorators.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
     def test_create_service_without_description(self):
@@ -72,7 +72,7 @@
             type=serv_type, name=name)['service']
         self.addCleanup(self.services_client.delete_service, service['id'])
         expected_data = {'name': name, 'type': serv_type}
-        self.assertDictContainsSubset(expected_data, service)
+        self.assertLessEqual(expected_data.items(), service.items())
 
     @decorators.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
     def test_list_services(self):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 5bbd65c..e191979 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -38,14 +38,17 @@
 
         # Create a user.
         user_password = data_utils.rand_password()
-        user = self.create_test_user(password=user_password)
+        user = self.create_test_user(password=user_password,
+                                     domain_id=CONF.identity.default_domain_id)
 
         # Create a couple projects
         project1_name = data_utils.rand_name(name=self.__class__.__name__)
-        project1 = self.setup_test_project(name=project1_name)
+        project1 = self.setup_test_project(
+            name=project1_name, domain_id=CONF.identity.default_domain_id)
 
         project2_name = data_utils.rand_name(name=self.__class__.__name__)
-        project2 = self.setup_test_project(name=project2_name)
+        project2 = self.setup_test_project(
+            name=project2_name, domain_id=CONF.identity.default_domain_id)
         self.addCleanup(self.projects_client.delete_project, project2['id'])
 
         # Create a role
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index 6425ea9..dc6dd4a 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -77,6 +77,8 @@
         time.sleep(1)
         self.non_admin_users_client.auth_provider.set_auth()
 
+    @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+                          'Security compliance not available.')
     @decorators.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
                       'Skipped because environment has an '
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 614dfcf..1ce9f47 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -53,7 +53,8 @@
 
     def tearDown(self):
         super(QuotasNegativeTest, self).tearDown()
-        self.credentials_provider.cleanup_default_secgroup(self.project['id'])
+        self.credentials_provider.cleanup_default_secgroup(
+            self.os_admin.security_groups_client, self.project['id'])
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 260ba74..696d68d 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -76,6 +76,8 @@
         cls.subnetpools_client = cls.os_primary.subnetpools_client
         cls.subnets_client = cls.os_primary.subnets_client
         cls.ports_client = cls.os_primary.ports_client
+        cls.floating_ips_port_forwarding_client =\
+            cls.os_primary.floating_ips_port_forwarding_client
         cls.quotas_client = cls.os_primary.network_quotas_client
         cls.floating_ips_client = cls.os_primary.floating_ips_client
         cls.security_groups_client = cls.os_primary.security_groups_client
@@ -85,6 +87,7 @@
         cls.service_providers_client = cls.os_primary.service_providers_client
         cls.tags_client = cls.os_primary.tags_client
         cls.log_resource_client = cls.os_primary.log_resource_client
+        cls.loggable_resource_client = cls.os_primary.loggable_resource_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 63078cd..190f7e0 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -51,7 +51,8 @@
 
     def _create_subnet(self, network, gateway='',
                        cidr=None, mask_bits=None, **kwargs):
-        subnet = self.create_subnet(network, gateway, cidr, mask_bits)
+        subnet = self.create_subnet(
+            network, gateway, cidr, mask_bits, **kwargs)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.subnets_client.delete_subnet, subnet['id'])
         return subnet
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index d75acfc..532ef65 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -110,6 +110,33 @@
         # Delete security group
         self._delete_security_group(group_create_body['security_group']['id'])
 
+    @decorators.idempotent_id('fd1ea1c5-eedc-403f-898d-2b562e853f2e')
+    def test_delete_security_group_clear_associated_rules(self):
+        """Verify delete security group.
+
+        its associated security group rules are also deleted
+        """
+        group_create_body, _ = self._create_security_group()
+
+        # Create rules for tcp protocol
+        client = self.security_group_rules_client
+        rule_create_body = client.create_security_group_rule(
+            security_group_id=group_create_body['security_group']['id'],
+            protocol='tcp',
+            direction='ingress',
+            ethertype=self.ethertype
+        )
+        rule_id = rule_create_body['security_group_rule']['id']
+        # Delete security group
+        self._delete_security_group(group_create_body['security_group']['id'])
+
+        # List rules and verify created rule is not in response
+        rule_list_body = (
+            self.security_group_rules_client.list_security_group_rules())
+        rule_list = [rule['id']
+                     for rule in rule_list_body['security_group_rules']]
+        self.assertNotIn(rule_id, rule_list)
+
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
     def test_create_show_delete_security_group_rule(self):
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 478a834..8d8039b 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.common import custom_matchers
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -27,11 +29,6 @@
 
     The containers should be visible from the container_client given.
     Will not throw any error if the containers don't exist.
-    Will not check that object and container deletions succeed.
-    After delete all the objects from a container, it will wait 2
-    seconds before delete the container itself, in order to deployments
-    using HA proxy sync the deletion properly, otherwise, the container
-    might fail to be deleted because it's not empty.
 
     :param containers: List of containers(or string of a container)
                        to be deleted
@@ -119,12 +116,20 @@
             object_name = data_utils.rand_name(name='TestObject')
         if data is None:
             data = data_utils.random_bytes()
-        cls.object_client.create_object(container_name,
-                                        object_name,
-                                        data,
-                                        metadata=metadata)
 
-        return object_name, data
+        err = Exception()
+        for _ in range(5):
+            try:
+                cls.object_client.create_object(container_name,
+                                                object_name,
+                                                data,
+                                                metadata=metadata)
+                return object_name, data
+            # after bucket creation we might see Conflict
+            except lib_exc.Conflict as e:
+                err = e
+                time.sleep(2)
+        raise err
 
     @classmethod
     def delete_containers(cls, container_client=None, object_client=None):
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index 31c33db..51c711f 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -36,6 +36,11 @@
             body = cls.capabilities_client.list_capabilities()
             cls.constraints = body['swift']
 
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerNegativeTest, cls).resource_cleanup()
+
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('30686921-4bed-4764-a038-40d741ed4e78')
     @testtools.skipUnless(
@@ -167,11 +172,7 @@
         # create a container and an object within it
         # attempt to delete a container that isn't empty.
         container_name = self.create_container()
-        self.addCleanup(self.container_client.delete_container,
-                        container_name)
         object_name, _ = self.create_object(container_name)
-        self.addCleanup(self.object_client.delete_object,
-                        container_name, object_name)
 
         ex = self.assertRaises(exceptions.Conflict,
                                self.container_client.delete_container,
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 276b826..6b1f849 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -58,6 +58,7 @@
 
         # Default container-server config only allows localhost
         cls.local_ip = '127.0.0.1'
+        cls.local_ip_v6 = '[::1]'
 
         # Must be configure according to container-sync interval
         container_sync_timeout = CONF.object_storage.container_sync_timeout
@@ -134,11 +135,18 @@
         """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
-            client_proxy_ip = \
-                urlparse.urlparse(cont_client.base_url).netloc.split(':')[0]
-            client_base_url = \
-                cont_client.base_url.replace(client_proxy_ip,
-                                             self.local_ip)
+            # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+            # handled properly as well
+            client_proxy_ip = urlparse.urlparse(
+                cont_client.base_url).netloc.rsplit(':', 1)[0]
+            if client_proxy_ip.startswith("["):  # lazy check
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip_v6)
+            else:
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip)
             headers = {'X-Container-Sync-Key': 'sync_key',
                        'X-Container-Sync-To': "%s/%s" %
                        (client_base_url, str(cont))}
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 93f6fdb..2823185 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -182,7 +182,7 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
-    @decorators.skip_because(bug='1905432')
+    @decorators.unstable_test(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 0c84357..22d12ce 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -18,7 +18,6 @@
 from tempest.api.object_storage import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 # Each segment, except for the final one, must be at least 1 megabyte
@@ -34,11 +33,7 @@
         self.objects = []
 
     def tearDown(self):
-        for obj in self.objects:
-            test_utils.call_and_ignore_notfound_exc(
-                self.object_client.delete_object,
-                self.container_name, obj)
-        self.container_client.delete_container(self.container_name)
+        self.delete_containers()
         super(ObjectSloTest, self).tearDown()
 
     def _create_object(self, container_name, object_name, data, params=None):
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index 659e2c4..73903cf 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -64,8 +64,8 @@
 class GroupSnapshotsTest(BaseGroupSnapshotsTest):
     """Test group snapshot"""
 
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
     def test_group_snapshot_create_show_list_delete(self):
@@ -252,11 +252,10 @@
 class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
     """Test group snapshot with volume microversion greater than 3.18"""
 
-    min_microversion = '3.19'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.19'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
-    @decorators.skip_because(bug='1770179')
     def test_reset_group_snapshot_status(self):
         """Test resetting group snapshot status to creating/available/error"""
         # Create volume type
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
index 5c5913e..181926e 100644
--- a/tempest/api/volume/admin/test_group_type_specs.py
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -21,8 +21,8 @@
 class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
     """Test group type specs"""
 
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
     def test_group_type_specs_create_show_update_list_delete(self):
@@ -73,10 +73,11 @@
         self.assertEqual(list_specs, body)
 
         # Delete specified item of group type specs
-        delete_key = 'key1'
-        self.admin_group_types_client.delete_group_type_specs_item(
-            group_type['id'], delete_key)
-        self.assertRaises(
-            lib_exc.NotFound,
-            self.admin_group_types_client.show_group_type_specs_item,
-            group_type['id'], delete_key)
+        delete_keys = ['key1', 'key2', 'key3']
+        for it in delete_keys:
+            self.admin_group_types_client.delete_group_type_specs_item(
+                group_type['id'], it)
+            self.assertRaises(
+                lib_exc.NotFound,
+                self.admin_group_types_client.show_group_type_specs_item,
+                group_type['id'], it)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index a7a5d6f..8154682 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -21,12 +21,12 @@
 class GroupTypesTest(base.BaseVolumeAdminTest):
     """Test group types"""
 
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
-    def test_group_type_create_list_update_show(self):
-        """Test create/list/update/show group type"""
+    def test_group_type_create_list_update_show_delete(self):
+        """Test create/list/update/show/delete group type"""
         name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
         description = data_utils.rand_name("group-type-description")
         group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
@@ -34,7 +34,8 @@
                   'description': description,
                   'group_specs': group_specs,
                   'is_public': True}
-        body = self.create_group_type(**params)
+        body = self.admin_group_types_client.create_group_type(
+            **params)['group_type']
         self.assertIn('name', body)
         err_msg = ("The created group_type %(var)s is not equal to the "
                    "requested %(var)s")
@@ -64,3 +65,9 @@
             self.assertEqual(params[key], fetched_group_type[key],
                              '%s of the fetched group_type is different '
                              'from the created group_type' % key)
+
+        self.admin_group_types_client.delete_group_type(body['id'])
+        group_list = (
+            self.admin_group_types_client.list_group_types()['group_types'])
+        group_ids = [it['id'] for it in group_list]
+        self.assertNotIn(body['id'], group_ids)
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 747a194..f16e4d2 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -25,8 +25,8 @@
 class GroupsTest(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.12"""
 
-    min_microversion = '3.13'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.13'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
     def test_group_create_show_list_delete(self):
@@ -155,8 +155,8 @@
 class GroupsV314Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.13"""
 
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
     def test_create_group_from_group(self):
@@ -192,8 +192,8 @@
 class GroupsV320Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.19"""
 
-    min_microversion = '3.20'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.20'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('b20c696b-0cbc-49a5-8b3a-b1fb9338f45c')
     def test_reset_group_status(self):
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 768c129..00b7f3a 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -24,8 +24,8 @@
 class UserMessagesTest(base.BaseVolumeAdminTest):
     """Test volume messages with microversion greater than 3.2"""
 
-    min_microversion = '3.3'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.3'
+    volume_max_microversion = 'latest'
 
     def _create_user_message(self):
         """Trigger a 'no valid host' situation to generate a message."""
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 5ab8e87..6b58189 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -87,7 +87,7 @@
         # test that the specific values we set are actually in
         # the final result. There is nothing here that ensures there
         # would be no other values in there.
-        self.assertDictContainsSubset(new_quota_set, quota_set)
+        self.assertLessEqual(new_quota_set.items(), quota_set.items())
 
     @decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed')
     def test_show_quota_usage(self):
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 5c14d52..4a3f494 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -13,6 +13,7 @@
 import abc
 
 from oslo_log import log as logging
+import testtools
 
 from tempest.api.volume import base
 from tempest.common import waiters
@@ -146,6 +147,8 @@
         self._retype_volume(src_vol, migration_policy='on-demand')
 
     @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          "Cinder volume snapshots are disabled.")
     def test_volume_from_snapshot_retype_with_migration(self):
         """Test volume created from snapshot retype with migration
 
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index 6b2a278..70a62ff 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -77,6 +77,21 @@
             extra_spec)
 
     @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('474090d2-0824-eb3b-9335-f506b4aa49d8')
+    def test_update_nonexistent_type_id(self):
+        """Test update volume type extra specs for non existent volume type
+
+        Update volume type extra specs for non existent volume type should
+        fail.
+        """
+        spec_key = "spec1"
+        extra_spec = {spec_key: "val5"}
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.admin_volume_types_client.update_volume_type_extra_specs,
+            data_utils.rand_uuid(), spec_key, extra_spec)
+
+    @decorators.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
         """Test creating volume type extra specs for non existent volume type
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index 174cf9e..f37c427 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -54,3 +54,28 @@
         volume_type = self.create_volume_type(**params)
         self.assertRaises(lib_exc.NotFound,
                           self.create_volume, volume_type=volume_type['id'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('a5924b5f-b6c1-49ba-994c-b4af55d26e52')
+    def test_create_volume_type_encryption_nonexistent_type_id(self):
+        """Test create encryption with nonexistent type id will fail"""
+        create_kwargs = {
+            'type_id': data_utils.rand_uuid(),
+            'provider': 'LuksEncryptor',
+            'key_size': 256,
+            'cipher': 'aes-xts-plain64',
+            'control_location': 'front-end'
+            }
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.create_encryption_type, **create_kwargs)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('969b10c7-3d77-4e1b-a4f2-2d265980f7e5')
+    def test_create_with_repeated_name(self):
+        """Test creating volume type with a repeated name will fail"""
+        volume_type_name = self.create_volume_type()['name']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.admin_volume_types_client.create_volume_type,
+            name=volume_type_name)
diff --git a/tempest/api/volume/api_microversion_fixture.py b/tempest/api/volume/api_microversion_fixture.py
index 7bbe674..219fde8 100644
--- a/tempest/api/volume/api_microversion_fixture.py
+++ b/tempest/api/volume/api_microversion_fixture.py
@@ -12,14 +12,23 @@
 # under the License.
 
 import fixtures
+from oslo_log import log as logging
 
 from tempest.lib.services.volume import base_client
 
+LOG = logging.getLogger(__name__)
+
 
 class APIMicroversionFixture(fixtures.Fixture):
 
     def __init__(self, volume_microversion):
         self.volume_microversion = volume_microversion
+        new_fixture = (
+            'tempest.lib.common.api_microversion_fixture.'
+            'APIMicroversionFixture')
+        LOG.warning("%s class is deprecated and moved to %s. It"
+                    " will be removed in Z cycle.",
+                    self.__class__.__name__, new_fixture)
 
     def _setUp(self):
         super(APIMicroversionFixture, self)._setUp()
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index d5c6fd9..b90b5bb 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.volume import api_microversion_fixture
 from tempest.common import compute
 from tempest.common import waiters
 from tempest import config
@@ -43,7 +42,7 @@
             raise cls.skipException(skip_msg)
 
         api_version_utils.check_skip_with_microversion(
-            cls.min_microversion, cls.max_microversion,
+            cls.volume_min_microversion, cls.volume_max_microversion,
             CONF.volume.min_microversion, CONF.volume.max_microversion)
 
     @classmethod
@@ -75,18 +74,20 @@
             cls.os_primary.volume_availability_zone_client_latest)
         cls.volume_limits_client = cls.os_primary.volume_limits_client_latest
 
-    def setUp(self):
-        super(BaseVolumeTest, self).setUp()
-        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            self.request_microversion))
-
     @classmethod
     def resource_setup(cls):
         super(BaseVolumeTest, cls).resource_setup()
-        cls.request_microversion = (
+        cls.volume_request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.volume_min_microversion,
+                CONF.volume.min_microversion))
+        cls.compute_request_microversion = (
             api_version_utils.select_request_microversion(
                 cls.min_microversion,
-                CONF.volume.min_microversion))
+                CONF.compute.min_microversion))
+        cls.setup_api_microversion_fixture(
+            compute_microversion=cls.compute_request_microversion,
+            volume_microversion=cls.volume_request_microversion)
 
         cls.image_ref = CONF.compute.image_ref
         cls.flavor_ref = CONF.compute.flavor_ref
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 9600aa9..f1dec06 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -109,8 +109,8 @@
 class VolumesTransfersV355Test(VolumesTransfersTest):
     """Test volume transfer for the "new" Transfers API mv 3.55"""
 
-    min_microversion = '3.55'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.55'
+    volume_max_microversion = 'latest'
 
     credentials = ['primary', 'alt', 'admin']
 
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index fff6a44..138d120 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -164,8 +164,8 @@
 class VolumesBackupsV39Test(base.BaseVolumeTest):
     """Test volumes backup with volume microversion greater than 3.8"""
 
-    min_microversion = '3.9'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.9'
+    volume_max_microversion = 'latest'
 
     @classmethod
     def skip_checks(cls):
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index d9790f3..fcbc982 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -80,7 +80,7 @@
     # NOTE(mriedem): The minimum required volume API version is 3.42 and the
     # minimum required compute API microversion is 2.51, but the compute call
     # is implicit - Cinder calls Nova at that microversion, Tempest does not.
-    min_microversion = '3.42'
+    volume_min_microversion = '3.42'
 
     def _find_extend_volume_instance_action(self, server_id):
         actions = self.servers_client.list_instance_actions(
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 28e41bf..2009970 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -143,8 +143,8 @@
 class VolumesSummaryTest(base.BaseVolumeTest):
     """Test volume summary"""
 
-    min_microversion = '3.12'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.12'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('c4f2431e-4920-4736-9e00-4040386b6feb')
     def test_show_volume_summary(self):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 554fc6a..d9b8430 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -336,6 +336,9 @@
 
         # Deactivate the image
         self.images_client.deactivate_image(image['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.images_client.reactivate_image, image['id'])
+
         body = self.images_client.show_image(image['id'])
         self.assertEqual("deactivated", body['status'])
         # Try creating a volume from deactivated image
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index fd2e7c4..a58da7e 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -167,7 +167,8 @@
     @decorators.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot(self):
         """Test creating volume from snapshot with extending size"""
-        self._create_volume_from_snapshot(extra_size=1)
+        self._create_volume_from_snapshot(
+            extra_size=CONF.volume.volume_size_extend)
 
     @decorators.idempotent_id('053d8870-8282-4fff-9dbb-99cb58bb5e0a')
     def test_volume_from_snapshot_no_size(self):
diff --git a/tempest/clients.py b/tempest/clients.py
index 1b05b54..4c3d875 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -59,6 +59,8 @@
         self.ports_client = self.network.PortsClient()
         self.network_quotas_client = self.network.QuotasClient()
         self.floating_ips_client = self.network.FloatingIPsClient()
+        self.floating_ips_port_forwarding_client =\
+            self.network.FloatingIpsPortForwardingClient()
         self.metering_labels_client = self.network.MeteringLabelsClient()
         self.metering_label_rules_client = (
             self.network.MeteringLabelRulesClient())
@@ -71,9 +73,13 @@
         self.tags_client = self.network.TagsClient()
         self.qos_client = self.network.QosClient()
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
+        self.qos_limit_bw_client = self.network.QosLimitBandwidthRulesClient()
+        self.qos_min_pps_client = (
+            self.network.QosMinimumPacketRateRulesClient())
         self.segments_client = self.network.SegmentsClient()
         self.trunks_client = self.network.TrunksClient()
         self.log_resource_client = self.network.LogResourceClient()
+        self.loggable_resource_client = self.network.LoggableResourceClient()
 
     def _set_image_clients(self):
         if CONF.service_available.glance:
@@ -112,7 +118,8 @@
         self.server_groups_client = self.compute.ServerGroupsClient()
         self.limits_client = self.compute.LimitsClient()
         self.compute_images_client = self.compute.ImagesClient()
-        self.keypairs_client = self.compute.KeyPairsClient()
+        self.keypairs_client = self.compute.KeyPairsClient(
+            ssh_key_type=CONF.validation.ssh_key_type)
         self.quotas_client = self.compute.QuotasClient()
         self.quota_classes_client = self.compute.QuotaClassesClient()
         self.flavors_client = self.compute.FlavorsClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 917262e..ad0b547 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -81,11 +81,11 @@
   will have the prefix with the given TAG in its name. Using tag is recommended
   for the further using, cleaning resources.
 
-* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count
-  (default: 1). The number of accounts required can be estimated as
-  CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
-  a different tenant. This is required to provide isolation between test for
-  running in parallel.
+* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count (default: 2).
+  The number of accounts generated will be same as CONCURRENCY. The higher the
+  number, the more tests will run in parallel. If you want to run tests
+  sequentially then use 1 as value for concurrency (beware that tests that need
+  more credentials will fail).
 
 * ``--with-admin`` (Optional) Creates admin for each concurrent group
   (default: False).
@@ -236,7 +236,7 @@
                         dest='tag',
                         help='Resources tag')
     parser.add_argument('-r', '--concurrency',
-                        default=1,
+                        default=2,
                         type=positive_int,
                         required=False,
                         dest='concurrency',
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0db1ab1..421afd3 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -130,7 +130,7 @@
             msg = ('Glance is available in the catalog, but no known version, '
                    '(v1.x or v2.x) of Glance could be found, so Glance should '
                    'be configured as not available')
-            LOG.warn(msg)
+            LOG.warning(msg)
             print_and_or_update('glance', 'service-available', False, update)
             return
 
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a062f6f..43e30ad 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -23,11 +23,14 @@
 from oslo_log import log as logging
 from oslo_utils import excutils
 
+from tempest.common.utils.linux import remote_client
 from tempest.common import waiters
 from tempest import config
+from tempest import exceptions
 from tempest.lib.common import fixed_network
 from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
 
 CONF = config.CONF
 
@@ -54,10 +57,37 @@
     return False
 
 
+def get_server_ip(server, validation_resources=None):
+    """Get the server fixed or floating IP.
+
+    Based on the configuration we're in, return a correct ip
+    address for validating that a guest is up.
+
+    :param server: The server dict as returned by the API
+    :param validation_resources: The dict of validation resources
+        provisioned for the server.
+    """
+    if CONF.validation.connect_method == 'floating':
+        if validation_resources:
+            return validation_resources['floating_ip']['ip']
+        else:
+            msg = ('When validation.connect_method equals floating, '
+                   'validation_resources cannot be None')
+            raise lib_exc.InvalidParam(invalid_param=msg)
+    elif CONF.validation.connect_method == 'fixed':
+        addresses = server['addresses'][CONF.validation.network_for_ssh]
+        for address in addresses:
+            if address['version'] == CONF.validation.ip_version_for_ssh:
+                return address['addr']
+        raise exceptions.ServerUnreachable(server_id=server['id'])
+    else:
+        raise lib_exc.InvalidConfiguration()
+
+
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
-                       image_id=None, wait_for_sshable=True, **kwargs):
+                       image_id=None, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -69,7 +99,9 @@
         server. Include a keypair, a security group and an IP.
     :param tenant_network: Tenant network to be used for creating a server.
     :param wait_until: Server status to wait for the server to reach after
-        its creation.
+        its creation. Additionally PINGABLE and SSHABLE states are also
+        accepted when the server is both validatable and has the required
+        validation_resources provided.
     :param volume_backed: Whether the server is volume backed or not.
         If this is true, a volume will be created and create server will be
         requested with 'block_device_mapping_v2' populated with below values:
@@ -93,13 +125,9 @@
         CONF.compute.flavor_ref will be used instead.
     :param image_id: ID of the image to be used to provision the server. If not
         defined, CONF.compute.image_ref will be used instead.
-    :param wait_for_sshable: Check server's console log and wait until it will
-        be ready to login.
     :returns: a tuple
     """
 
-    # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
-
     if name is None:
         name = data_utils.rand_name(__name__ + "-instance")
     if flavor is None:
@@ -197,6 +225,7 @@
     body = clients.servers_client.create_server(name=name, imageRef=image_id,
                                                 flavorRef=flavor,
                                                 **kwargs)
+    request_id = body.response['x-openstack-request-id']
 
     # handle the case of multiple servers
     if multiple_create_request:
@@ -231,17 +260,50 @@
                 server_id=servers[0]['id'])
 
     if wait_until:
+
+        # NOTE(lyarwood): PINGABLE and SSHABLE both require the instance to
+        # go ACTIVE initially before we can setup the fip(s) etc so stash
+        # this additional wait state for later use.
+        wait_until_extra = None
+        if wait_until in ['PINGABLE', 'SSHABLE']:
+            wait_until_extra = wait_until
+            wait_until = 'ACTIVE'
+
         for server in servers:
             try:
                 waiters.wait_for_server_status(
-                    clients.servers_client, server['id'], wait_until)
+                    clients.servers_client, server['id'], wait_until,
+                    request_id=request_id)
 
-                # Multiple validatable servers are not supported for now. Their
-                # creation will fail with the condition above.
                 if CONF.validation.run_validation and validatable:
+
                     if CONF.validation.connect_method == 'floating':
                         _setup_validation_fip()
 
+                    server_ip = get_server_ip(
+                        server, validation_resources=validation_resources)
+
+                    if wait_until_extra == 'PINGABLE':
+                        waiters.wait_for_ping(
+                            server_ip,
+                            clients.servers_client.build_timeout,
+                            clients.servers_client.build_interval
+                        )
+
+                    if wait_until_extra == 'SSHABLE':
+                        pkey = validation_resources['keypair']['private_key']
+                        ssh_client = remote_client.RemoteClient(
+                            server_ip,
+                            CONF.validation.image_ssh_user,
+                            pkey=pkey,
+                            server=server,
+                            servers_client=clients.servers_client
+                        )
+                        waiters.wait_for_ssh(
+                            ssh_client,
+                            clients.servers_client.build_timeout
+                        )
+
             except Exception:
                 with excutils.save_and_reraise_exception():
                     for server in servers:
@@ -265,10 +327,6 @@
                             LOG.exception('Server %s failed to delete in time',
                                           server['id'])
 
-    if (validatable and CONF.compute_feature_enabled.console_output and
-            wait_for_sshable):
-        waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
-
     return body, servers
 
 
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 5d6e129..9d9fab7 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -48,7 +48,8 @@
             console_output_enabled=CONF.compute_feature_enabled.console_output,
             ssh_shell_prologue=CONF.validation.ssh_shell_prologue,
             ping_count=CONF.validation.ping_count,
-            ping_size=CONF.validation.ping_size)
+            ping_size=CONF.validation.ping_size,
+            ssh_key_type=CONF.validation.ssh_key_type)
 
     # Note that this method will not work on SLES11 guests, as they do
     # not support the TYPE column on lsblk
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f6a4555..ab401fb 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -10,6 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
 import re
 import time
 
@@ -32,7 +33,8 @@
 
 # NOTE(afazekas): This function needs to know a token and a subject.
 def wait_for_server_status(client, server_id, status, ready_wait=True,
-                           extra_timeout=0, raise_on_error=True):
+                           extra_timeout=0, raise_on_error=True,
+                           request_id=None):
     """Waits for a server to reach a given status."""
 
     # NOTE(afazekas): UNKNOWN status possible on ERROR
@@ -71,11 +73,12 @@
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
         if (server_status == 'ERROR') and raise_on_error:
+            details = ''
             if 'fault' in body:
-                raise exceptions.BuildErrorException(body['fault'],
-                                                     server_id=server_id)
-            else:
-                raise exceptions.BuildErrorException(server_id=server_id)
+                details += 'Fault: %s.' % body['fault']
+            if request_id:
+                details += ' Server boot request ID: %s.' % request_id
+            raise exceptions.BuildErrorException(details, server_id=server_id)
 
         timed_out = int(time.time()) - start_time >= timeout
 
@@ -88,6 +91,8 @@
                         'status': status,
                         'expected_task_state': expected_task_state,
                         'timeout': timeout})
+            if request_id:
+                message += ' Server boot request ID: %s.' % request_id
             message += ' Current status: %s.' % server_status
             message += ' Current task state: %s.' % task_state
             caller = test_utils.find_test_caller()
@@ -489,18 +494,34 @@
     return body
 
 
-def wait_for_interface_detach(client, server_id, port_id):
+def wait_for_interface_detach(client, server_id, port_id, detach_request_id):
     """Waits for an interface to be detached from a server."""
-    body = client.list_interfaces(server_id)['interfaceAttachments']
-    ports = [iface['port_id'] for iface in body]
+    def _get_detach_event_results():
+        # NOTE(gibi): The obvious choice for this waiter would be to wait
+        # until the interface disappears from the client.list_interfaces()
+        # response. However that response is based on the binding status of the
+        # port in Neutron. Nova deallocates the port resources _after the port
+        # was  unbound in Neutron. This can cause that the naive waiter would
+        # return before the port is fully deallocated. Wait instead of the
+        # os-instance-action to succeed as that is recorded after both the
+        # port is fully deallocated.
+        events = client.show_instance_action(
+            server_id, detach_request_id)['instanceAction'].get('events', [])
+        return [
+            event['result'] for event in events
+            if event['event'] == 'compute_detach_interface'
+        ]
+
+    detach_event_results = _get_detach_event_results()
+
     start = int(time.time())
 
-    while port_id in ports:
+    while "Success" not in detach_event_results:
         time.sleep(client.build_interval)
-        body = client.list_interfaces(server_id)['interfaceAttachments']
-        ports = [iface['port_id'] for iface in body]
-        if port_id not in ports:
-            return body
+        detach_event_results = _get_detach_event_results()
+        if "Success" in detach_event_results:
+            return client.show_instance_action(
+                server_id, detach_request_id)['instanceAction']
 
         timed_out = int(time.time()) - start >= client.build_timeout
         if timed_out:
@@ -510,18 +531,66 @@
             raise lib_exc.TimeoutException(message)
 
 
-def wait_for_guest_os_boot(client, server_id):
+def wait_for_server_floating_ip(servers_client, server, floating_ip,
+                                wait_for_disassociate=False):
+    """Wait for floating IP association or disassociation.
+
+    :param servers_client: The servers client to use when querying the server's
+    floating IPs.
+    :param server: The server JSON dict on which to wait.
+    :param floating_ip: The floating IP JSON dict on which to wait.
+    :param wait_for_disassociate: Boolean indiating whether to wait for
+    disassociation instead of association.
+    """
+
+    def _get_floating_ip_in_server_addresses(floating_ip, server):
+        for addresses in server['addresses'].values():
+            for address in addresses:
+                if (
+                    address['OS-EXT-IPS:type'] == 'floating' and
+                    address['addr'] == floating_ip['floating_ip_address']
+                ):
+                    return address
+        return None
+
     start_time = int(time.time())
     while True:
-        console_output = client.get_console_output(server_id)['output']
-        for line in console_output.split('\n'):
-            if 'login:' in line.lower():
-                return
-        if int(time.time()) - start_time >= client.build_timeout:
-            LOG.info("Guest OS on server %s probably isn't ready or its "
-                     "console log can't be parsed properly. If guest OS "
-                     "isn't ready, that may cause problems with SSH to "
-                     "the server.",
-                     server_id)
+        server = servers_client.show_server(server['id'])['server']
+        address = _get_floating_ip_in_server_addresses(floating_ip, server)
+        if address is None and wait_for_disassociate:
+            return None
+        if not wait_for_disassociate and address:
+            return address
+
+        if int(time.time()) - start_time >= servers_client.build_timeout:
+            if wait_for_disassociate:
+                msg = ('Floating ip %s failed to disassociate from server %s '
+                       'in time.' % (floating_ip, server['id']))
+            else:
+                msg = ('Floating ip %s failed to associate with server %s '
+                       'in time.' % (floating_ip, server['id']))
+            raise lib_exc.TimeoutException(msg)
+        time.sleep(servers_client.build_interval)
+
+
+def wait_for_ping(server_ip, timeout=30, interval=1):
+    """Waits for an address to become pingable"""
+    start_time = int(time.time())
+    while int(time.time()) - start_time < timeout:
+        response = os.system("ping -c 1 " + server_ip)
+        if response == 0:
             return
-        time.sleep(client.build_interval)
+        time.sleep(interval)
+    raise lib_exc.TimeoutException()
+
+
+def wait_for_ssh(ssh_client, timeout=30):
+    """Waits for SSH connection to become usable"""
+    start_time = int(time.time())
+    while int(time.time()) - start_time < timeout:
+        try:
+            ssh_client.validate_authentication()
+            return
+        except lib_exc.SSHTimeout:
+            pass
+    raise lib_exc.TimeoutException()
diff --git a/tempest/config.py b/tempest/config.py
index a354689..0f509fb 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -259,8 +259,11 @@
                 help='Does the environment have the security compliance '
                      'settings enabled?'),
     cfg.BoolOpt('project_tags',
-                default=False,
-                help='Is the project tags identity v3 API available?'),
+                default=True,
+                help='Is the project tags identity v3 API available?',
+                deprecated_for_removal=True,
+                deprecated_reason='Project tags API is a default feature '
+                                  'since Queens'),
     cfg.BoolOpt('application_credentials',
                 default=True,
                 help='Does the environment have application credentials '
@@ -875,7 +878,10 @@
                     'bandwidth allocation.'),
     cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
                help='Base segmentation ID to create provider networks. '
-                    'This value will be increased in case of conflict.')
+                    'This value will be increased in case of conflict.'),
+    cfg.BoolOpt('qos_min_bw_and_pps', default=False,
+                help='Does the test environment have minimum bandwidth and '
+                     'packet rate inventories configured?'),
 ]
 
 dashboard_group = cfg.OptGroup(name="dashboard",
@@ -967,6 +973,10 @@
                default='public',
                help="Network used for SSH connections. Ignored if "
                     "connect_method=floating."),
+    cfg.StrOpt('ssh_key_type',
+               default='rsa',
+               help='Type of key to use for ssh connections. '
+                    'Valid types are rsa, ecdsa'),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index c1e6b2d..1c9c55b 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -318,3 +318,16 @@
                        " to all negative API tests"
                 )
             _HAVE_NEGATIVE_DECORATOR = False
+
+
+@core.flake8ext
+def no_log_warn(logical_line):
+    """Disallow 'LOG.warn('
+
+    Use LOG.warning() instead of Deprecated LOG.warn().
+    https://docs.python.org/3/library/logging.html#logging.warning
+    """
+
+    msg = ("T118: LOG.warn is deprecated, please use LOG.warning!")
+    if "LOG.warn(" in logical_line:
+        yield (0, msg)
diff --git a/tempest/lib/api_schema/response/compute/v2_16/servers.py b/tempest/lib/api_schema/response/compute/v2_16/servers.py
index fc81ff7..dcd64cf 100644
--- a/tempest/lib/api_schema/response/compute/v2_16/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_16/servers.py
@@ -171,3 +171,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index b6c3c14..0e4bd5c 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -61,3 +61,4 @@
 attach_volume = copy.deepcopy(serversv216.attach_volume)
 show_volume_attachment = copy.deepcopy(serversv216.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
+show_instance_action = copy.deepcopy(serversv216.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_26/servers.py b/tempest/lib/api_schema/response/compute/v2_26/servers.py
index 5a0f987..74c08f1 100644
--- a/tempest/lib/api_schema/response/compute/v2_26/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_26/servers.py
@@ -104,3 +104,4 @@
 attach_volume = copy.deepcopy(servers219.attach_volume)
 show_volume_attachment = copy.deepcopy(servers219.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers219.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers219.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_3/servers.py b/tempest/lib/api_schema/response/compute/v2_3/servers.py
index 1674c1b..435e3ac 100644
--- a/tempest/lib/api_schema/response/compute/v2_3/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_3/servers.py
@@ -176,3 +176,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_33/__init__.py b/tempest/lib/api_schema/response/compute/v2_33/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_33/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py b/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py
new file mode 100644
index 0000000..9773605
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py
@@ -0,0 +1,53 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_28 \
+    import hypervisors as hypervisorsv228
+
+###########################################################################
+#
+# 2.33:
+#
+# hypervisor_links parameter is added to the response body for the following
+# APIs:
+#
+# - GET /os-hypervisors
+# - GET /os-hypervisors/detail
+###########################################################################
+list_search_hypervisors = copy.deepcopy(
+    hypervisorsv228.list_search_hypervisors)
+list_search_hypervisors['response_body']['properties'].update(
+    {'hypervisors_links': parameter_types.links}
+)
+
+list_hypervisors_detail = copy.deepcopy(
+    hypervisorsv228.list_hypervisors_detail)
+list_hypervisors_detail['response_body']['properties'].update(
+    {'hypervisors_links': parameter_types.links}
+)
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.28 ***
+get_hypervisor = copy.deepcopy(hypervisorsv228.get_hypervisor)
+hypervisor_detail = copy.deepcopy(hypervisorsv228.hypervisor_detail)
+get_hypervisor_statistics = \
+    copy.deepcopy(hypervisorsv228.get_hypervisor_statistics)
+get_hypervisor_uptime = copy.deepcopy(hypervisorsv228.get_hypervisor_uptime)
+get_hypervisors_servers = copy.deepcopy(
+    hypervisorsv228.get_hypervisors_servers)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index d580f2c..7050602 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -69,3 +69,4 @@
 attach_volume = copy.deepcopy(servers226.attach_volume)
 show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers226.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_48/servers.py b/tempest/lib/api_schema/response/compute/v2_48/servers.py
index e2e45bc..af6344b 100644
--- a/tempest/lib/api_schema/response/compute/v2_48/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_48/servers.py
@@ -132,3 +132,4 @@
 attach_volume = copy.deepcopy(servers247.attach_volume)
 show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers247.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_50/__init__.py b/tempest/lib/api_schema/response/compute/v2_50/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_50/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py b/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py
new file mode 100644
index 0000000..4ee845f
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py
@@ -0,0 +1,48 @@
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.lib.api_schema.response.compute.v2_1 import quota_classes \
+    as quota_classesv21
+
+# Compute microversion 2.50:
+# 1. fixed_ips, floating_ips, security_group_rules and security_groups
+#    are removed from:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+# 2. server_groups and server_group_members are added to:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+
+get_quota_class_set = copy.deepcopy(quota_classesv21.get_quota_class_set)
+update_quota_class_set = copy.deepcopy(quota_classesv21.update_quota_class_set)
+for field in ['fixed_ips', 'floating_ips', 'security_group_rules',
+              'security_groups']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].remove(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    update_quota_class_set['response_body']['properties'][
+        'quota_class_set']['required'].remove(field)
+for field in ['server_groups', 'server_group_members']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].update({field: {'type': 'integer'}})
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].append(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].update({field: {'type': 'integer'}})
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].append(field)
diff --git a/tempest/lib/api_schema/response/compute/v2_51/__init__.py b/tempest/lib/api_schema/response/compute/v2_51/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_51/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_51/servers.py b/tempest/lib/api_schema/response/compute/v2_51/servers.py
new file mode 100644
index 0000000..e603287
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_51/servers.py
@@ -0,0 +1,42 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+
+# microversion 2.51 made events a mandatory field in the response
+show_instance_action = copy.deepcopy(servers248.show_instance_action)
+show_instance_action['response_body'][
+    'properties']['instanceAction']['required'].append('events')
+
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers248.list_servers)
+show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
+list_tags = copy.deepcopy(servers248.list_tags)
+update_all_tags = copy.deepcopy(servers248.update_all_tags)
+delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
+update_tag = copy.deepcopy(servers248.update_tag)
+delete_tag = copy.deepcopy(servers248.delete_tag)
+get_server = copy.deepcopy(servers248.get_server)
+list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
+update_server = copy.deepcopy(servers248.update_server)
+rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers248.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers248.attach_volume)
+show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py b/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py
new file mode 100644
index 0000000..e172f1f
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py
@@ -0,0 +1,68 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_33 \
+    import hypervisors as hypervisorsv233
+
+###########################################################################
+#
+# 2.53:
+#
+# servers parameter is added to the response body for the following
+# APIs:
+#
+# - GET /os-hypervisor
+# - GET /os-hypervisors
+# - GET /os-hypervisors/detail
+#
+###########################################################################
+
+servers = {
+    'type': 'array',
+    'items': {
+        'type': 'object',
+        'properties': {
+            'uuid': {'type': 'string', 'format': 'uuid'},
+            'name': {'type': 'string'},
+        },
+        'additionalProperties': False,
+    },
+}
+
+hypervisor_detail = copy.deepcopy(hypervisorsv233.hypervisor_detail)
+hypervisor_detail['properties'].update({'servers': servers})
+get_hypervisor = copy.deepcopy(hypervisorsv233.get_hypervisor)
+get_hypervisor['response_body']['properties'].update(
+    {'hypervisor': hypervisor_detail})
+list_hypervisors_detail = copy.deepcopy(
+    hypervisorsv233.list_hypervisors_detail)
+list_hypervisors_detail['response_body']['properties']['hypervisors'].update(
+    {'items': hypervisor_detail})
+
+list_search_hypervisors = copy.deepcopy(
+    hypervisorsv233.list_search_hypervisors)
+list_search_hypervisors['response_body']['properties']['hypervisors'][
+    'items']['properties'].update({'servers': servers})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.33 ***
+get_hypervisor_statistics = \
+    copy.deepcopy(hypervisorsv233.get_hypervisor_statistics)
+get_hypervisor_uptime = copy.deepcopy(hypervisorsv233.get_hypervisor_uptime)
+get_hypervisors_servers = copy.deepcopy(
+    hypervisorsv233.get_hypervisors_servers)
diff --git a/tempest/lib/api_schema/response/compute/v2_54/servers.py b/tempest/lib/api_schema/response/compute/v2_54/servers.py
index 2c2bff0..135b381 100644
--- a/tempest/lib/api_schema/response/compute/v2_54/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_54/servers.py
@@ -12,7 +12,7 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as servers251
 # ****** Schemas changed in microversion 2.54 *****************
 
 # Note(gmann): This is schema for microversion 2.54 which includes the
@@ -26,14 +26,14 @@
     ]
 }
 
-rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server = copy.deepcopy(servers251.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'key_name': key_name})
 rebuild_server['response_body']['properties']['server'][
     'required'].append('key_name')
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers248.rebuild_server_with_admin_pass)
+    servers251.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'key_name': key_name})
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
@@ -43,18 +43,19 @@
 # to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
 # ****** Schemas unchanged in microversion 2.54 since microversion 2.48 ***
-get_server = copy.deepcopy(servers248.get_server)
-list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
-update_server = copy.deepcopy(servers248.update_server)
-list_servers = copy.deepcopy(servers248.list_servers)
-show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
-list_tags = copy.deepcopy(servers248.list_tags)
-update_all_tags = copy.deepcopy(servers248.update_all_tags)
-delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
-update_tag = copy.deepcopy(servers248.update_tag)
-delete_tag = copy.deepcopy(servers248.delete_tag)
-attach_volume = copy.deepcopy(servers248.attach_volume)
-show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
+get_server = copy.deepcopy(servers251.get_server)
+list_servers_detail = copy.deepcopy(servers251.list_servers_detail)
+update_server = copy.deepcopy(servers251.update_server)
+list_servers = copy.deepcopy(servers251.list_servers)
+show_server_diagnostics = copy.deepcopy(servers251.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers251.get_remote_consoles)
+list_tags = copy.deepcopy(servers251.list_tags)
+update_all_tags = copy.deepcopy(servers251.update_all_tags)
+delete_all_tags = copy.deepcopy(servers251.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers251.check_tag_existence)
+update_tag = copy.deepcopy(servers251.update_tag)
+delete_tag = copy.deepcopy(servers251.delete_tag)
+attach_volume = copy.deepcopy(servers251.attach_volume)
+show_volume_attachment = copy.deepcopy(servers251.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers251.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers251.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py b/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py
new file mode 100644
index 0000000..396ed66
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py
@@ -0,0 +1,37 @@
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.lib.api_schema.response.compute.v2_50 import quota_classes \
+    as quota_classesv250
+
+# Compute microversion 2.57:
+# 1. injected_file_content_bytes, injected_file_path_bytes, injected_files
+#    are removed from:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+
+get_quota_class_set = copy.deepcopy(quota_classesv250.get_quota_class_set)
+update_quota_class_set = copy.deepcopy(
+    quota_classesv250.update_quota_class_set)
+for field in ['injected_file_content_bytes', 'injected_file_path_bytes',
+              'injected_files']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].remove(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    update_quota_class_set['response_body']['properties'][
+        'quota_class_set']['required'].remove(field)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
index aa57d25..bdff74b 100644
--- a/tempest/lib/api_schema/response/compute/v2_57/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -62,3 +62,4 @@
 attach_volume = copy.deepcopy(servers254.attach_volume)
 show_volume_attachment = copy.deepcopy(servers254.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers254.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers254.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_58/__init__.py b/tempest/lib/api_schema/response/compute/v2_58/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_58/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_58/servers.py b/tempest/lib/api_schema/response/compute/v2_58/servers.py
new file mode 100644
index 0000000..62239cf
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_58/servers.py
@@ -0,0 +1,44 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
+# microversion 2.58 added updated_at to the response
+show_instance_action = copy.deepcopy(servers257.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['updated_at'] = parameter_types.date_time
+show_instance_action['response_body']['properties']['instanceAction'][
+    'required'].append('updated_at')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers257.list_servers)
+show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
+list_tags = copy.deepcopy(servers257.list_tags)
+update_all_tags = copy.deepcopy(servers257.update_all_tags)
+delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
+update_tag = copy.deepcopy(servers257.update_tag)
+delete_tag = copy.deepcopy(servers257.delete_tag)
+get_server = copy.deepcopy(servers257.get_server)
+list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+update_server = copy.deepcopy(servers257.update_server)
+rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers257.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers257.attach_volume)
+show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index 922bf79..6103b7c 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -31,6 +31,7 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
 
 # NOTE: The consolidated remote console API got introduced with v2.6
 # with bp/consolidate-console-api. See Nova commit 578bafeda
diff --git a/tempest/lib/api_schema/response/compute/v2_62/__init__.py b/tempest/lib/api_schema/response/compute/v2_62/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_62/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_62/servers.py b/tempest/lib/api_schema/response/compute/v2_62/servers.py
new file mode 100644
index 0000000..23eebbb
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_62/servers.py
@@ -0,0 +1,47 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_58 import servers as servers258
+
+# microversion 2.62 added hostId and host to the event, but only hostId is
+# mandatory
+show_instance_action = copy.deepcopy(servers258.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items'][
+    'properties']['hostId'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items']['properties']['host'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items']['required'].append('hostId')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers258.list_servers)
+show_server_diagnostics = copy.deepcopy(servers258.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers258.get_remote_consoles)
+list_tags = copy.deepcopy(servers258.list_tags)
+update_all_tags = copy.deepcopy(servers258.update_all_tags)
+delete_all_tags = copy.deepcopy(servers258.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers258.check_tag_existence)
+update_tag = copy.deepcopy(servers258.update_tag)
+delete_tag = copy.deepcopy(servers258.delete_tag)
+get_server = copy.deepcopy(servers258.get_server)
+list_servers_detail = copy.deepcopy(servers258.list_servers_detail)
+update_server = copy.deepcopy(servers258.update_server)
+rebuild_server = copy.deepcopy(servers258.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers258.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers258.attach_volume)
+show_volume_attachment = copy.deepcopy(servers258.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers258.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
index 01910aa..db713b1 100644
--- a/tempest/lib/api_schema/response/compute/v2_63/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -12,7 +12,7 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+from tempest.lib.api_schema.response.compute.v2_62 import servers as servers262
 
 # Nova microversion 2.63 adds 'trusted_image_certificates' (a list of
 # certificate IDs) to the server rebuild and servers details responses.
@@ -29,32 +29,32 @@
     }
 }
 
-list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+list_servers_detail = copy.deepcopy(servers262.list_servers_detail)
 list_servers_detail['response_body']['properties']['servers']['items'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('trusted_image_certificates')
 
-rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server = copy.deepcopy(servers262.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 rebuild_server['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers257.rebuild_server_with_admin_pass)
+    servers262.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
-update_server = copy.deepcopy(servers257.update_server)
+update_server = copy.deepcopy(servers262.update_server)
 update_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 update_server['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
-get_server = copy.deepcopy(servers257.get_server)
+get_server = copy.deepcopy(servers262.get_server)
 get_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 get_server['response_body']['properties']['server'][
@@ -64,15 +64,16 @@
 # to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
 # ****** Schemas unchanged since microversion 2.57 ***
-list_servers = copy.deepcopy(servers257.list_servers)
-show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
-list_tags = copy.deepcopy(servers257.list_tags)
-update_all_tags = copy.deepcopy(servers257.update_all_tags)
-delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
-update_tag = copy.deepcopy(servers257.update_tag)
-delete_tag = copy.deepcopy(servers257.delete_tag)
-attach_volume = copy.deepcopy(servers257.attach_volume)
-show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
+list_servers = copy.deepcopy(servers262.list_servers)
+show_server_diagnostics = copy.deepcopy(servers262.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers262.get_remote_consoles)
+list_tags = copy.deepcopy(servers262.list_tags)
+update_all_tags = copy.deepcopy(servers262.update_all_tags)
+delete_all_tags = copy.deepcopy(servers262.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers262.check_tag_existence)
+update_tag = copy.deepcopy(servers262.update_tag)
+delete_tag = copy.deepcopy(servers262.delete_tag)
+attach_volume = copy.deepcopy(servers262.attach_volume)
+show_volume_attachment = copy.deepcopy(servers262.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers262.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers262.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_64/__init__.py b/tempest/lib/api_schema/response/compute/v2_64/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_64/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_64/server_groups.py b/tempest/lib/api_schema/response/compute/v2_64/server_groups.py
new file mode 100644
index 0000000..1402de5
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_64/server_groups.py
@@ -0,0 +1,56 @@
+# Copyright 2020 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_13 import server_groups as \
+    server_groupsv213
+
+# Compute microversion 2.64:
+# 1. change policies to policy in:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+# 2. add rules in:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+# 3. remove metadata from:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+
+common_server_group = copy.deepcopy(server_groupsv213.common_server_group)
+common_server_group['properties']['policy'] = {'type': 'string'}
+common_server_group['properties']['rules'] = {'type': 'object'}
+common_server_group['properties'].pop('policies')
+common_server_group['properties'].pop('metadata')
+common_server_group['required'].append('policy')
+common_server_group['required'].append('rules')
+common_server_group['required'].remove('policies')
+common_server_group['required'].remove('metadata')
+
+create_show_server_group = copy.deepcopy(
+    server_groupsv213.create_show_server_group)
+create_show_server_group['response_body']['properties'][
+    'server_group'] = common_server_group
+
+list_server_groups = copy.deepcopy(server_groupsv213.list_server_groups)
+list_server_groups['response_body']['properties']['server_groups'][
+    'items'] = common_server_group
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+delete_server_group = copy.deepcopy(server_groupsv213.delete_server_group)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/servers.py b/tempest/lib/api_schema/response/compute/v2_70/servers.py
index 5ca4cc8..6103923 100644
--- a/tempest/lib/api_schema/response/compute/v2_70/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_70/servers.py
@@ -78,3 +78,4 @@
 check_tag_existence = copy.deepcopy(servers263.check_tag_existence)
 update_tag = copy.deepcopy(servers263.update_tag)
 delete_tag = copy.deepcopy(servers263.delete_tag)
+show_instance_action = copy.deepcopy(servers263.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index f4c01ee..3e55c1c 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -82,3 +82,4 @@
 attach_volume = copy.deepcopy(servers270.attach_volume)
 show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers270.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index ae7ebc4..e7a1d87 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -79,3 +79,4 @@
 attach_volume = copy.deepcopy(servers271.attach_volume)
 show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers271.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_79/servers.py b/tempest/lib/api_schema/response/compute/v2_79/servers.py
index 58dcba8..b5507f9 100644
--- a/tempest/lib/api_schema/response/compute/v2_79/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_79/servers.py
@@ -65,3 +65,4 @@
 check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
 update_tag = copy.deepcopy(servers273.update_tag)
 delete_tag = copy.deepcopy(servers273.delete_tag)
+show_instance_action = copy.deepcopy(servers273.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_8/servers.py b/tempest/lib/api_schema/response/compute/v2_8/servers.py
index 3dbab3f..119d8e2 100644
--- a/tempest/lib/api_schema/response/compute/v2_8/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_8/servers.py
@@ -38,3 +38,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index ee0313d..9258eec 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -57,3 +57,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index ffcf488..4f44526 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -21,7 +21,7 @@
     'items': {
         'type': 'object',
         'properties': {
-            'server_id': {'type': 'string', 'format': 'uuid'},
+            'server_id': {'type': ['string', 'null'], 'format': 'uuid'},
             'attachment_id': {'type': 'string', 'format': 'uuid'},
             'attached_at': parameter_types.date_time_or_null,
             'host_name': {'type': ['string', 'null']},
diff --git a/tempest/lib/base.py b/tempest/lib/base.py
index 74ae77c..3be55c0 100644
--- a/tempest/lib/base.py
+++ b/tempest/lib/base.py
@@ -14,29 +14,11 @@
 #    under the License.
 
 import os
-import sys
 
 import fixtures
-import pkg_resources
 import testtools
 
 
-def _handle_skip_exception():
-    try:
-        stestr_version = pkg_resources.parse_version(
-            pkg_resources.get_distribution("stestr").version)
-        stestr_min = pkg_resources.parse_version('2.5.0')
-        new_stestr = (stestr_version >= stestr_min)
-        import unittest
-        import unittest2
-        if sys.version_info >= (3, 5) and new_stestr:
-            testtools.TestCase.skipException = unittest.case.SkipTest
-        else:
-            testtools.TestCase.skipException = unittest2.case.SkipTest
-    except Exception:
-        pass
-
-
 class BaseTestCase(testtools.testcase.WithAttributes, testtools.TestCase):
     setUpClassCalled = False
 
@@ -51,18 +33,6 @@
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
         cls.setUpClassCalled = True
-        # TODO(gmann): cls.handle_skip_exception is really workaround for
-        # testtools bug- https://github.com/testing-cabal/testtools/issues/272
-        # stestr which is used by Tempest internally to run the test switch
-        # the customize test runner(which use stdlib unittest) for >=py3.5
-        # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
-        # These two test runner are not compatible due to skip exception
-        # handling(due to unittest2). testtools.run treat unittestt.SkipTest
-        # as error and stdlib unittest treat unittest2.case.SkipTest raised
-        # by testtools.TestCase.skipException.
-        # The below workaround can be removed once testtools fix issue# 272.
-        cls.orig_skip_exception = testtools.TestCase.skipException
-        _handle_skip_exception()
 
     @classmethod
     def tearDownClass(cls):
@@ -70,7 +40,6 @@
             super(BaseTestCase, cls).tearDownClass()
 
     def setUp(self):
-        testtools.TestCase.skipException = self.orig_skip_exception
         super(BaseTestCase, self).setUp()
         if not self.setUpClassCalled:
             raise RuntimeError("setUpClass does not calls the super's "
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 0ae11ca..466222d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -38,7 +38,7 @@
 
 class SourcePatcher(object):
 
-    """"Lazy patcher for python source files"""
+    """Lazy patcher for python source files"""
 
     def __init__(self):
         self.source_files = None
@@ -431,14 +431,21 @@
                         help='Package with tests')
     parser.add_argument('--fix', action='store_true', dest='fix_tests',
                         help='Attempt to fix tests without UUIDs')
+    parser.add_argument('--libpath', action='store', dest='libpath',
+                        default=".", type=str,
+                        help='Path to package')
+
     args = parser.parse_args()
-    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+    sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+    sys.path.insert(0, args.libpath)
     pkg = importlib.import_module(args.package)
+
     checker = TestChecker(pkg)
     errors = False
     tests = checker.get_tests()
     untagged = checker.find_untagged(tests)
     errors = checker.report_collisions(tests) or errors
+
     if args.fix_tests and untagged:
         checker.fix_tests(untagged)
     else:
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 069172a..2da206f 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,11 +13,13 @@
 #    limitations under the License.
 
 import abc
-
+from oslo_log import log as logging
 
 from tempest.lib import auth
 from tempest.lib import exceptions
 
+LOG = logging.getLogger(__name__)
+
 
 class CredentialProvider(object, metaclass=abc.ABCMeta):
     def __init__(self, identity_version, name=None,
@@ -125,6 +127,18 @@
     def is_role_available(self, role):
         return
 
+    def cleanup_default_secgroup(self, security_group_client, tenant):
+        resp_body = security_group_client.list_security_groups(
+            tenant_id=tenant,
+            name="default")
+        secgroups_to_delete = resp_body['security_groups']
+        for secgroup in secgroups_to_delete:
+            try:
+                security_group_client.delete_security_group(secgroup['id'])
+            except exceptions.NotFound:
+                LOG.warning('Security group %s, id %s not found for clean-up',
+                            secgroup['name'], secgroup['id'])
+
 
 class TestResources(object):
     """Readonly Credentials, with network resources added."""
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 2e93fd5..be8c0e8 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -518,18 +518,6 @@
             LOG.warning('network with name: %s not found for delete',
                         network_name)
 
-    def cleanup_default_secgroup(self, tenant):
-        nsg_client = self.security_groups_admin_client
-        resp_body = nsg_client.list_security_groups(tenant_id=tenant,
-                                                    name="default")
-        secgroups_to_delete = resp_body['security_groups']
-        for secgroup in secgroups_to_delete:
-            try:
-                nsg_client.delete_security_group(secgroup['id'])
-            except lib_exc.NotFound:
-                LOG.warning('Security group %s, id %s not found for clean-up',
-                            secgroup['name'], secgroup['id'])
-
     def _clear_isolated_net_resources(self):
         client = self.routers_admin_client
         for cred in self._creds:
@@ -578,7 +566,8 @@
             # ensure tenant deletion without big changes.
             try:
                 if self.neutron_available:
-                    self.cleanup_default_secgroup(creds.tenant_id)
+                    self.cleanup_default_secgroup(
+                        self.security_groups_admin_client, creds.tenant_id)
             except lib_exc.NotFound:
                 LOG.warning("failed to cleanup tenant %s's secgroup",
                             creds.tenant_name)
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 0ac757d..1618175 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -31,7 +31,7 @@
 @FORMAT_CHECKER.checks('iso8601-date-time')
 def _validate_datetime_format(instance):
     try:
-        if isinstance(instance, jsonschema.compat.str_types):
+        if instance is not None:
             timeutils.parse_isotime(instance)
     except ValueError:
         return False
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 3f735f5..ef14dfc 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -907,8 +907,8 @@
             if int(time.time()) - start_time >= self.build_timeout:
                 message = ('Failed to delete %(resource_type)s %(id)s within '
                            'the required time (%(timeout)s s). Timer started '
-                           'at %(start_time)s. Timer ended at %(end_time)s'
-                           'waited for %(wait_time)s' %
+                           'at %(start_time)s. Timer ended at %(end_time)s. '
+                           'Waited for %(wait_time)s s.' %
                            {'resource_type': self.resource_type, 'id': id,
                             'timeout': self.build_timeout,
                             'start_time': start_time,
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index ee15375..cb59a82 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -21,6 +21,7 @@
 import warnings
 
 from oslo_log import log as logging
+from oslo_utils.secretutils import md5
 
 from tempest.lib import exceptions
 
@@ -33,11 +34,26 @@
 LOG = logging.getLogger(__name__)
 
 
+def get_fingerprint(self):
+    """Patch paramiko
+
+    This method needs to be patched to allow paramiko to work under FIPS.
+    Until the patch to do this merges, patch paramiko here.
+
+    TODO(alee) Remove this when paramiko is patched.
+    See https://github.com/paramiko/paramiko/pull/1928
+    """
+    return md5(self.asbytes(), usedforsecurity=False).digest()
+
+
+paramiko.pkey.PKey.get_fingerprint = get_fingerprint
+
+
 class Client(object):
 
     def __init__(self, host, username, password=None, timeout=300, pkey=None,
                  channel_timeout=10, look_for_keys=False, key_filename=None,
-                 port=22, proxy_client=None):
+                 port=22, proxy_client=None, ssh_key_type='rsa'):
         """SSH client.
 
         Many of parameters are just passed to the underlying implementation
@@ -59,6 +75,7 @@
         :param proxy_client: Another SSH client to provide a transport
             for ssh-over-ssh.  The default is None, which means
             not to use ssh-over-ssh.
+        :param ssh_key_type: ssh key type (rsa, ecdsa)
         :type proxy_client: ``tempest.lib.common.ssh.Client`` object
         """
         self.host = host
@@ -66,8 +83,15 @@
         self.port = port
         self.password = password
         if isinstance(pkey, str):
-            pkey = paramiko.RSAKey.from_private_key(
-                io.StringIO(str(pkey)))
+            if ssh_key_type == 'rsa':
+                pkey = paramiko.RSAKey.from_private_key(
+                    io.StringIO(str(pkey)))
+            elif ssh_key_type == 'ecdsa':
+                pkey = paramiko.ECDSAKey.from_private_key(
+                    io.StringIO(str(pkey)))
+            else:
+                raise exceptions.SSHClientUnsupportedKeyType(
+                    key_type=ssh_key_type)
         self.pkey = pkey
         self.look_for_keys = look_for_keys
         self.key_filename = key_filename
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index d84dd28..224f3bf 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -69,7 +69,7 @@
                  server=None, servers_client=None, ssh_timeout=300,
                  connect_timeout=60, console_output_enabled=True,
                  ssh_shell_prologue="set -eu -o pipefail; PATH=$PATH:/sbin;",
-                 ping_count=1, ping_size=56):
+                 ping_count=1, ping_size=56, ssh_key_type='rsa'):
         """Executes commands in a VM over ssh
 
         :param ip_address: IP address to ssh to
@@ -84,6 +84,7 @@
         :param ssh_shell_prologue: Shell fragments to use before command
         :param ping_count: Number of ping packets
         :param ping_size: Packet size for ping packets
+        :param ssh_key_type: ssh key type (rsa, ecdsa)
         """
         self.server = server
         self.servers_client = servers_client
@@ -92,10 +93,12 @@
         self.ssh_shell_prologue = ssh_shell_prologue
         self.ping_count = ping_count
         self.ping_size = ping_size
+        self.ssh_key_type = ssh_key_type
 
         self.ssh_client = ssh.Client(ip_address, username, password,
                                      ssh_timeout, pkey=pkey,
-                                     channel_timeout=connect_timeout)
+                                     channel_timeout=connect_timeout,
+                                     ssh_key_type=ssh_key_type)
 
     @debug_ssh
     def exec_command(self, cmd):
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index abe68d2..dd7885e 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -256,6 +256,10 @@
                "%(port)s and username: %(username)s as parent")
 
 
+class SSHClientUnsupportedKeyType(TempestException):
+    message = ("SSH client: unsupported key type %(key_type)s")
+
+
 class UnknownServiceClient(TempestException):
     message = "Service clients named %(services)s are not known"
 
diff --git a/tempest/lib/services/compute/hypervisor_client.py b/tempest/lib/services/compute/hypervisor_client.py
index 1cbfcc3..e237845 100644
--- a/tempest/lib/services/compute/hypervisor_client.py
+++ b/tempest/lib/services/compute/hypervisor_client.py
@@ -13,12 +13,18 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1 \
     import hypervisors as schemav21
 from tempest.lib.api_schema.response.compute.v2_28 \
     import hypervisors as schemav228
+from tempest.lib.api_schema.response.compute.v2_33 \
+    import hypervisors as schemav233
+from tempest.lib.api_schema.response.compute.v2_53 \
+    import hypervisors as schemav253
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -27,9 +33,11 @@
 
     schema_versions_info = [
         {'min': None, 'max': '2.27', 'schema': schemav21},
-        {'min': '2.28', 'max': None, 'schema': schemav228}]
+        {'min': '2.28', 'max': '2.32', 'schema': schemav228},
+        {'min': '2.33', 'max': '2.52', 'schema': schemav233},
+        {'min': '2.53', 'max': None, 'schema': schemav253}]
 
-    def list_hypervisors(self, detail=False):
+    def list_hypervisors(self, detail=False, **kwargs):
         """List hypervisors information."""
         url = 'os-hypervisors'
         schema = self.get_schema(self.schema_versions_info)
@@ -37,14 +45,19 @@
         if detail:
             url += '/detail'
             _schema = schema.list_hypervisors_detail
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
 
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(_schema, resp, body)
         return rest_client.ResponseBody(resp, body)
 
-    def show_hypervisor(self, hypervisor_id):
+    def show_hypervisor(self, hypervisor_id, **kwargs):
         """Display the details of the specified hypervisor."""
+        url = 'os-hypervisors/%s' % hypervisor_id
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
         resp, body = self.get('os-hypervisors/%s' % hypervisor_id)
         body = json.loads(body)
         schema = self.get_schema(self.schema_versions_info)
diff --git a/tempest/lib/services/compute/keypairs_client.py b/tempest/lib/services/compute/keypairs_client.py
index 9d7b7fc..51a4583 100644
--- a/tempest/lib/services/compute/keypairs_client.py
+++ b/tempest/lib/services/compute/keypairs_client.py
@@ -15,6 +15,10 @@
 
 from urllib import parse as urllib
 
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives import serialization
+
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1 import keypairs as schemav21
@@ -28,6 +32,12 @@
     schema_versions_info = [{'min': None, 'max': '2.1', 'schema': schemav21},
                             {'min': '2.2', 'max': None, 'schema': schemav22}]
 
+    def __init__(self, auth_provider, service, region,
+                 ssh_key_type='rsa', **kwargs):
+        super(KeyPairsClient, self).__init__(
+            auth_provider, service, region, **kwargs)
+        self.ssh_key_type = ssh_key_type
+
     def list_keypairs(self, **params):
         """Lists keypairs that are associated with the account.
 
@@ -67,12 +77,30 @@
         API reference:
         https://docs.openstack.org/api-ref/compute/#create-or-import-keypair
         """
+        pkey = None
+        if (self.ssh_key_type == 'ecdsa' and 'public_key' not in kwargs and
+            ('type' not in kwargs or kwargs['type'] == 'ssh')):
+            # create a ecdsa key and pass the public key into the request
+            pkey = ec.generate_private_key(ec.SECP384R1(), default_backend())
+            pubkey = pkey.public_key().public_bytes(
+                encoding=serialization.Encoding.OpenSSH,
+                format=serialization.PublicFormat.OpenSSH)
+            kwargs['public_key'] = pubkey
+
         post_body = json.dumps({'keypair': kwargs})
         resp, body = self.post("os-keypairs", body=post_body)
         body = json.loads(body)
         schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.create_keypair, resp, body)
-        return rest_client.ResponseBody(resp, body)
+        resp_body = rest_client.ResponseBody(resp, body)
+        if pkey:
+            # add the privkey to the response as it was generated here
+            privkey = pkey.private_bytes(
+                encoding=serialization.Encoding.PEM,
+                format=serialization.PrivateFormat.TraditionalOpenSSL,
+                encryption_algorithm=serialization.NoEncryption())
+            resp_body['keypair']['private_key'] = privkey.decode('utf-8')
+        return resp_body
 
     def delete_keypair(self, keypair_name, **params):
         """Deletes a keypair.
diff --git a/tempest/lib/services/compute/quota_classes_client.py b/tempest/lib/services/compute/quota_classes_client.py
index 9b64099..5f220a7 100644
--- a/tempest/lib/services/compute/quota_classes_client.py
+++ b/tempest/lib/services/compute/quota_classes_client.py
@@ -16,20 +16,30 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1\
-    import quota_classes as classes_schema
+    import quota_classes as schema
+from tempest.lib.api_schema.response.compute.v2_50 import quota_classes \
+    as schemav250
+from tempest.lib.api_schema.response.compute.v2_57 import quota_classes \
+    as schemav257
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
 
 class QuotaClassesClient(base_compute_client.BaseComputeClient):
 
+    schema_versions_info = [
+        {'min': None, 'max': '2.49', 'schema': schema},
+        {'min': '2.50', 'max': '2.56', 'schema': schemav250},
+        {'min': '2.57', 'max': None, 'schema': schemav257}]
+
     def show_quota_class_set(self, quota_class_id):
         """List the quota class set for a quota class."""
 
         url = 'os-quota-class-sets/%s' % quota_class_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.validate_response(classes_schema.get_quota_class_set, resp, body)
+        _schema = self.get_schema(self.schema_versions_info)
+        self.validate_response(_schema.get_quota_class_set, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_quota_class_set(self, quota_class_id, **kwargs):
@@ -45,6 +55,7 @@
                               post_body)
 
         body = json.loads(body)
-        self.validate_response(classes_schema.update_quota_class_set,
+        _schema = self.get_schema(self.schema_versions_info)
+        self.validate_response(_schema.update_quota_class_set,
                                resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/server_groups_client.py b/tempest/lib/services/compute/server_groups_client.py
index 89ad2d9..9895653 100644
--- a/tempest/lib/services/compute/server_groups_client.py
+++ b/tempest/lib/services/compute/server_groups_client.py
@@ -20,6 +20,8 @@
     as schema
 from tempest.lib.api_schema.response.compute.v2_13 import server_groups \
     as schemav213
+from tempest.lib.api_schema.response.compute.v2_64 import server_groups \
+    as schemav264
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -28,7 +30,8 @@
 
     schema_versions_info = [
         {'min': None, 'max': '2.12', 'schema': schema},
-        {'min': '2.13', 'max': None, 'schema': schemav213}]
+        {'min': '2.13', 'max': '2.63', 'schema': schemav213},
+        {'min': '2.64', 'max': None, 'schema': schemav264}]
 
     def create_server_group(self, **kwargs):
         """Create the server group.
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index e58890c..ed3d4c0 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -29,9 +29,12 @@
 from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
 from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
 from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as schemav251
 from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
 from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
+from tempest.lib.api_schema.response.compute.v2_58 import servers as schemav258
 from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
+from tempest.lib.api_schema.response.compute.v2_62 import servers as schemav262
 from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
 from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
 from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
@@ -56,9 +59,12 @@
         {'min': '2.19', 'max': '2.25', 'schema': schemav219},
         {'min': '2.26', 'max': '2.46', 'schema': schemav226},
         {'min': '2.47', 'max': '2.47', 'schema': schemav247},
-        {'min': '2.48', 'max': '2.53', 'schema': schemav248},
+        {'min': '2.48', 'max': '2.50', 'schema': schemav248},
+        {'min': '2.51', 'max': '2.53', 'schema': schemav251},
         {'min': '2.54', 'max': '2.56', 'schema': schemav254},
-        {'min': '2.57', 'max': '2.62', 'schema': schemav257},
+        {'min': '2.57', 'max': '2.57', 'schema': schemav257},
+        {'min': '2.58', 'max': '2.61', 'schema': schemav258},
+        {'min': '2.62', 'max': '2.62', 'schema': schemav262},
         {'min': '2.63', 'max': '2.69', 'schema': schemav263},
         {'min': '2.70', 'max': '2.70', 'schema': schemav270},
         {'min': '2.71', 'max': '2.72', 'schema': schemav271},
@@ -715,6 +721,7 @@
         resp, body = self.get("servers/%s/os-instance-actions/%s" %
                               (server_id, request_id))
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.show_instance_action, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index fc85140..faf35d1 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,7 +15,11 @@
 from tempest.lib.services.network.agents_client import AgentsClient
 from tempest.lib.services.network.extensions_client import ExtensionsClient
 from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.floating_ips_port_forwarding_client import \
+    FloatingIpsPortForwardingClient
 from tempest.lib.services.network.log_resource_client import LogResourceClient
+from tempest.lib.services.network.loggable_resource_client import \
+    LoggableResourceClient
 from tempest.lib.services.network.metering_label_rules_client import \
     MeteringLabelRulesClient
 from tempest.lib.services.network.metering_labels_client import \
@@ -23,8 +27,12 @@
 from tempest.lib.services.network.networks_client import NetworksClient
 from tempest.lib.services.network.ports_client import PortsClient
 from tempest.lib.services.network.qos_client import QosClient
+from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
+    QosLimitBandwidthRulesClient
 from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
     QosMinimumBandwidthRulesClient
+from tempest.lib.services.network.qos_minimum_packet_rate_rules_client import \
+    QosMinimumPacketRateRulesClient
 from tempest.lib.services.network.quotas_client import QuotasClient
 from tempest.lib.services.network.routers_client import RoutersClient
 from tempest.lib.services.network.security_group_rules_client import \
@@ -41,9 +49,11 @@
 from tempest.lib.services.network.versions_client import NetworkVersionsClient
 
 __all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
-           'MeteringLabelRulesClient', 'MeteringLabelsClient',
-           'NetworksClient', 'NetworkVersionsClient', 'PortsClient',
-           'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
-           'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
+           'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
+           'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
+           'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
+           'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
+           'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
-           'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient']
+           'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
+           'LoggableResourceClient', 'QosMinimumPacketRateRulesClient']
diff --git a/tempest/lib/services/network/floating_ips_port_forwarding_client.py b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..43e24ea
--- /dev/null
+++ b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
@@ -0,0 +1,78 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class FloatingIpsPortForwardingClient(base.BaseNetworkClient):
+
+    def create_port_forwarding(self, floatingip_id, **kwargs):
+        """Creates a floating IP port forwarding.
+
+        Creates port forwarding by using the configuration that you define in
+        the request object.
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings' % floatingip_id
+        post_data = {'port_forwarding': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_port_forwarding(
+            self, floatingip_id, port_forwarding_id, **kwargs):
+        """Updates a floating IP port_forwarding resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-a-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        post_data = {'port_forwarding': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_port_forwarding(
+            self, floatingip_id, port_forwarding_id, **fields):
+        """Shows details for a floating IP port forwarding id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_port_forwarding(self, floatingip_id, port_forwarding_id):
+        """Deletes a floating IP port_forwarding resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-a-floating-ip-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        return self.delete_resource(uri)
+
+    def list_port_forwardings(self, floatingip_id, **filters):
+        """Lists floating Ip port forwardings.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ip-port-forwardings-detail
+        """
+        uri = '/floatingips/%s/port_forwardings' % floatingip_id
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/loggable_resource_client.py b/tempest/lib/services/network/loggable_resource_client.py
new file mode 100644
index 0000000..774046f
--- /dev/null
+++ b/tempest/lib/services/network/loggable_resource_client.py
@@ -0,0 +1,29 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LoggableResourceClient(base.BaseNetworkClient):
+
+    def list_loggable_resources(self, **filters):
+        """List Loggable resources.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-loggable-resources
+        """
+        uri = '/log/loggable-resources'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..8fd87fe
--- /dev/null
+++ b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosLimitBandwidthRulesClient(base.BaseNetworkClient):
+
+    def create_limit_bandwidth_rule(self, qos_policy_id, **kwargs):
+        """Creates a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(
+            qos_policy_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_limit_bandwidth_rule(self, qos_policy_id, rule_id, **kwargs):
+        """Updates a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_limit_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-bandwidth-limit-rule-details
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_limit_bandwidth_rule(self, qos_policy_id, rule_id):
+        """Deletes a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_limit_bandwidth_rules(self, qos_policy_id, **filters):
+        """Lists all limit bandwidth rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-bandwidth-limit-rules-for-qos-policy
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(qos_policy_id)
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..98bcafe
--- /dev/null
+++ b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,73 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosMinimumPacketRateRulesClient(base.BaseNetworkClient):
+
+    def create_minimum_packet_rate_rule(self, qos_policy_id, **kwargs):
+        """Creates a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_minimum_packet_rate_rule(
+        self, qos_policy_id, rule_id, **kwargs
+    ):
+        """Updates a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_minimum_packet_rate_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-minimum-packet-rate-rule-details
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_minimum_packet_rate_rule(self, qos_policy_id, rule_id):
+        """Deletes a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_minimum_packet_rate_rules(self, qos_policy_id, **filters):
+        """Lists all minimum packet rate rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-minimum-packet-rate-rules-for-qos-policy
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        return self.list_resources(uri, **filters)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 39021d5..7aa96b2 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -30,7 +30,6 @@
 from tempest.common import waiters
 from tempest import config
 from tempest import exceptions
-from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
@@ -85,12 +84,10 @@
                 cls.placement_min_microversion,
                 CONF.placement.min_microversion))
 
-    def setUp(self):
-        super(ScenarioTest, self).setUp()
-        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            compute_microversion=self.compute_request_microversion,
-            volume_microversion=self.volume_request_microversion,
-            placement_microversion=self.placement_request_microversion))
+        cls.setup_api_microversion_fixture(
+            compute_microversion=cls.compute_request_microversion,
+            volume_microversion=cls.volume_request_microversion,
+            placement_microversion=cls.placement_request_microversion)
 
     def setup_compute_client(cls):
         """Compute client"""
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
deleted file mode 100644
index 55b8d15..0000000
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# Copyright (c) 2019 Ericsson
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import testtools
-
-from tempest.common import utils
-from tempest.common import waiters
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
-from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
-from tempest.scenario import manager
-
-
-CONF = config.CONF
-
-
-class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
-    credentials = ['primary', 'admin']
-    required_extensions = ['port-resource-request',
-                           'qos',
-                           'qos-bw-minimum-ingress']
-    # The feature QoS minimum bandwidth allocation in Placement API depends on
-    # Granular resource requests to GET /allocation_candidates and Support
-    # allocation candidates with nested resource providers features in
-    # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
-    # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
-    # means that the minimum placement microversion is 1.29
-    placement_min_microversion = '1.29'
-    placement_max_microversion = 'latest'
-
-    # Nova rejects to boot VM with port which has resource_request field, below
-    # microversion 2.72
-    compute_min_microversion = '2.72'
-    compute_max_microversion = 'latest'
-
-    INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
-    INGRESS_DIRECTION = 'ingress'
-
-    SMALLEST_POSSIBLE_BW = 1
-    # For any realistic inventory value (that is inventory != MAX_INT) an
-    # allocation candidate request of MAX_INT is expected to be rejected, see:
-    # https://github.com/openstack/placement/blob/master/placement/
-    # db/constants.py#L16
-    PLACEMENT_MAX_INT = 0x7FFFFFFF
-    BANDWIDTH_1 = 1000
-    BANDWIDTH_2 = 2000
-
-    @classmethod
-    def setup_clients(cls):
-        super(MinBwAllocationPlacementTest, cls).setup_clients()
-        cls.placement_client = cls.os_admin.placement_client
-        cls.networks_client = cls.os_admin.networks_client
-        cls.subnets_client = cls.os_admin.subnets_client
-        cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_adm.routers_client
-        cls.qos_client = cls.os_admin.qos_client
-        cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
-        cls.flavors_client = cls.os_adm.flavors_client
-        cls.servers_client = cls.os_adm.servers_client
-
-    @classmethod
-    def skip_checks(cls):
-        super(MinBwAllocationPlacementTest, cls).skip_checks()
-        if not CONF.network_feature_enabled.qos_placement_physnet:
-            msg = "Skipped as no physnet is available in config for " \
-                  "placement based QoS allocation."
-            raise cls.skipException(msg)
-
-    def setUp(self):
-        super(MinBwAllocationPlacementTest, self).setUp()
-        self._check_if_allocation_is_possible()
-
-    def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
-        policy = self.qos_client.create_qos_policy(
-            name=data_utils.rand_name(name_prefix),
-            shared=True)['policy']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.qos_client.delete_qos_policy, policy['id'])
-        rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
-            policy['id'],
-            **{
-                'min_kbps': min_kbps,
-                'direction': self.INGRESS_DIRECTION
-            })['minimum_bandwidth_rule']
-        self.addCleanup(
-            test_utils.call_and_ignore_notfound_exc,
-            self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
-            rule['id'])
-
-        return policy
-
-    def _create_qos_basic_policies(self):
-        self.qos_policy_valid = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_valid',
-            min_kbps=self.SMALLEST_POSSIBLE_BW)
-        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_not_valid',
-            min_kbps=self.PLACEMENT_MAX_INT)
-
-    def _create_qos_policies_from_life(self):
-        # For tempest-slow the max bandwidth configured is 1000000,
-        # https://opendev.org/openstack/tempest/src/branch/master/
-        # .zuul.yaml#L416-L420
-        self.qos_policy_1 = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_1',
-            min_kbps=self.BANDWIDTH_1
-        )
-        self.qos_policy_2 = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_2',
-            min_kbps=self.BANDWIDTH_2
-        )
-
-    def _create_network_and_qos_policies(self, policy_method):
-        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
-        base_segm = \
-            CONF.network_feature_enabled.provider_net_base_segmentation_id
-
-        self.prov_network, _, _ = self.setup_network_subnet_with_router(
-            networks_client=self.networks_client,
-            routers_client=self.routers_client,
-            subnets_client=self.subnets_client,
-            **{
-                'shared': True,
-                'provider:network_type': 'vlan',
-                'provider:physical_network': physnet_name,
-                'provider:segmentation_id': base_segm
-            })
-
-        policy_method()
-
-    def _check_if_allocation_is_possible(self):
-        alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
-                                  self.SMALLEST_POSSIBLE_BW))
-        if len(alloc_candidates['provider_summaries']) == 0:
-            # Skip if the backend does not support QoS minimum bandwidth
-            # allocation in Placement API
-            raise self.skipException(
-                'No allocation candidates are available for %s:%s' %
-                (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
-
-        # Just to be sure check with impossible high (placement max_int),
-        # allocation
-        alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
-                                  self.PLACEMENT_MAX_INT))
-        if len(alloc_candidates['provider_summaries']) != 0:
-            self.fail('For %s:%s there should be no available candidate!' %
-                      (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
-
-    def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
-        wait_until = (None if status == 'ERROR' else status)
-        port = self.create_port(
-            self.prov_network['id'], qos_policy_id=qos_policy_id)
-
-        server = self.create_server(networks=[{'port': port['id']}],
-                                    wait_until=wait_until)
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
-            status=status, ready_wait=False, raise_on_error=False)
-        return server, port
-
-    def _assert_allocation_is_as_expected(self, consumer, port_ids,
-                                          min_kbps=SMALLEST_POSSIBLE_BW):
-        allocations = self.placement_client.list_allocations(
-            consumer)['allocations']
-        self.assertGreater(len(allocations), 0)
-        bw_resource_in_alloc = False
-        for rp, resources in allocations.items():
-            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
-                self.assertEqual(
-                    min_kbps,
-                    resources['resources'][self.INGRESS_RESOURCE_CLASS])
-                bw_resource_in_alloc = True
-                allocation_rp = rp
-        if min_kbps:
-            self.assertTrue(bw_resource_in_alloc)
-
-            # Check binding_profile of the port is not empty and equals with
-            # the rp uuid
-            for port_id in port_ids:
-                port = self.os_admin.ports_client.show_port(port_id)
-                self.assertEqual(
-                    allocation_rp,
-                    port['port']['binding:profile']['allocation'])
-
-    @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_basic(self):
-        """"Basic scenario with QoS min bw allocation in placement.
-
-        Steps:
-        * Create prerequisites:
-        ** VLAN type provider network with subnet.
-        ** valid QoS policy with minimum bandwidth rule with min_kbps=1
-        (This is a simplification to skip the checks in placement for
-        detecting the resource provider tree and inventories, as if
-        bandwidth resource is available 1 kbs will be available).
-        ** invalid QoS policy with minimum bandwidth rule with
-        min_kbs=max integer from placement (this is a simplification again
-        to avoid detection of RP tress and inventories, as placement will
-        reject such big allocation).
-        * Create port with valid QoS policy, and boot VM with that, it should
-        pass.
-        * Create port with invalid QoS policy, and try to boot VM with that,
-        it should fail.
-        """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
-        server1, valid_port = self._boot_vm_with_min_bw(
-            qos_policy_id=self.qos_policy_valid['id'])
-        self._assert_allocation_is_as_expected(server1['id'],
-                                               [valid_port['id']])
-
-        server2, not_valid_port = self._boot_vm_with_min_bw(
-            self.qos_policy_not_valid['id'], status='ERROR')
-        allocations = self.placement_client.list_allocations(server2['id'])
-
-        self.assertEqual(0, len(allocations['allocations']))
-        server2 = self.servers_client.show_server(server2['id'])
-        self.assertIn('fault', server2['server'])
-        self.assertIn('No valid host', server2['server']['fault']['message'])
-        # Check that binding_profile of the port is empty
-        port = self.os_admin.ports_client.show_port(not_valid_port['id'])
-        self.assertEqual(0, len(port['port']['binding:profile']))
-
-    @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
-    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
-                          'Cold migration is not available.')
-    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
-                          'Less than 2 compute nodes, skipping multinode '
-                          'tests.')
-    @utils.services('compute', 'network')
-    def test_migrate_with_qos_min_bw_allocation(self):
-        """Scenario to migrate VM with QoS min bw allocation in placement
-
-        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
-        checks, and
-        * migrate the server
-        * confirm the resize, if the VM state is VERIFY_RESIZE
-        * If the VM goes to ACTIVE state check that allocations are as
-        expected.
-        """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
-        server, valid_port = self._boot_vm_with_min_bw(
-            qos_policy_id=self.qos_policy_valid['id'])
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-        self.servers_client.migrate_server(server_id=server['id'])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
-            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
-
-        # TODO(lajoskatona): Check that the allocations are ok for the
-        #  migration?
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-        self.servers_client.confirm_resize_server(server_id=server['id'])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
-            status='ACTIVE', ready_wait=False, raise_on_error=True)
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-    @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
-    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
-                          'Resize not available.')
-    @utils.services('compute', 'network')
-    def test_resize_with_qos_min_bw_allocation(self):
-        """Scenario to resize VM with QoS min bw allocation in placement.
-
-        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
-        checks, and
-        * resize the server with new flavor
-        * confirm the resize, if the VM state is VERIFY_RESIZE
-        * If the VM goes to ACTIVE state check that allocations are as
-        expected.
-        """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
-        server, valid_port = self._boot_vm_with_min_bw(
-            qos_policy_id=self.qos_policy_valid['id'])
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-        old_flavor = self.flavors_client.show_flavor(
-            CONF.compute.flavor_ref)['flavor']
-        new_flavor = self.flavors_client.create_flavor(**{
-            'ram': old_flavor['ram'],
-            'vcpus': old_flavor['vcpus'],
-            'name': old_flavor['name'] + 'extra',
-            'disk': old_flavor['disk'] + 1
-        })['flavor']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.flavors_client.delete_flavor, new_flavor['id'])
-
-        self.servers_client.resize_server(
-            server_id=server['id'], flavor_ref=new_flavor['id'])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
-            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
-
-        # TODO(lajoskatona): Check that the allocations are ok for the
-        #  migration?
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-        self.servers_client.confirm_resize_server(server_id=server['id'])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server['id'],
-            status='ACTIVE', ready_wait=False, raise_on_error=True)
-        self._assert_allocation_is_as_expected(server['id'],
-                                               [valid_port['id']])
-
-    @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_update_policy(self):
-        """Test the update of QoS policy on bound port
-
-        Related RFE in neutron: #1882804
-        The scenario is the following:
-        * Have a port with QoS policy and minimum bandwidth rule.
-        * Boot a VM with the port.
-        * Update the port with a new policy with different minimum bandwidth
-        values.
-        * The allocation on placement side should be according to the new
-        rules.
-        """
-        if not utils.is_network_feature_enabled('update_port_qos'):
-            raise self.skipException("update_port_qos feature is not enabled")
-
-        self._create_network_and_qos_policies(
-            self._create_qos_policies_from_life)
-
-        port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': port['id']}])
-
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
-
-        self.ports_client.update_port(
-            port['id'],
-            **{'qos_policy_id': self.qos_policy_2['id']})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_2)
-
-        # I changed my mind
-        self.ports_client.update_port(
-            port['id'],
-            **{'qos_policy_id': self.qos_policy_1['id']})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
-
-        # bad request....
-        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_not_valid',
-            min_kbps=self.PLACEMENT_MAX_INT)
-        port_orig = self.ports_client.show_port(port['id'])['port']
-        self.assertRaises(
-            lib_exc.Conflict,
-            self.ports_client.update_port,
-            port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
-
-        port_upd = self.ports_client.show_port(port['id'])['port']
-        self.assertEqual(port_orig['qos_policy_id'],
-                         port_upd['qos_policy_id'])
-        self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
-
-    @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_update_policy_from_zero(self):
-        """Test port without QoS policy to have QoS policy
-
-        This scenario checks if updating a port without QoS policy to
-        have QoS policy with minimum_bandwidth rule succeeds only on
-        controlplane, but placement allocation remains 0.
-        """
-        if not utils.is_network_feature_enabled('update_port_qos'):
-            raise self.skipException("update_port_qos feature is not enabled")
-
-        self._create_network_and_qos_policies(
-            self._create_qos_policies_from_life)
-
-        port = self.create_port(self.prov_network['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': port['id']}])
-
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
-
-        self.ports_client.update_port(
-            port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
-
-    @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_update_policy_to_zero(self):
-        """Test port with QoS policy to remove QoS policy
-
-        In this scenario port with QoS minimum_bandwidth rule update to
-        remove QoS policy results in 0 placement allocation.
-        """
-        if not utils.is_network_feature_enabled('update_port_qos'):
-            raise self.skipException("update_port_qos feature is not enabled")
-
-        self._create_network_and_qos_policies(
-            self._create_qos_policies_from_life)
-
-        port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': port['id']}])
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
-
-        self.ports_client.update_port(
-            port['id'],
-            **{'qos_policy_id': None})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
-
-    @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_update_with_multiple_ports(self):
-        if not utils.is_network_feature_enabled('update_port_qos'):
-            raise self.skipException("update_port_qos feature is not enabled")
-
-        self._create_network_and_qos_policies(
-            self._create_qos_policies_from_life)
-
-        port1 = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
-        port2 = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': port1['id']}, {'port': port2['id']}])
-        self._assert_allocation_is_as_expected(
-            server1['id'], [port1['id'], port2['id']],
-            self.BANDWIDTH_1 + self.BANDWIDTH_2)
-
-        self.ports_client.update_port(
-            port1['id'],
-            **{'qos_policy_id': self.qos_policy_2['id']})
-        self._assert_allocation_is_as_expected(
-            server1['id'], [port1['id'], port2['id']],
-            2 * self.BANDWIDTH_2)
-
-    @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
-    @utils.services('compute', 'network')
-    def test_empty_update(self):
-        if not utils.is_network_feature_enabled('update_port_qos'):
-            raise self.skipException("update_port_qos feature is not enabled")
-
-        self._create_network_and_qos_policies(
-            self._create_qos_policies_from_life)
-
-        port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': port['id']}])
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
-        self.ports_client.update_port(
-            port['id'],
-            **{'description': 'foo'})
-        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
-                                               self.BANDWIDTH_1)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2c981c8..5aac19c 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -96,13 +96,6 @@
                    '%s' % (secgroup['id'], server['id']))
             raise exceptions.TimeoutException(msg)
 
-    def _get_floating_ip_in_server_addresses(self, floating_ip, server):
-        for addresses in server['addresses'].values():
-            for address in addresses:
-                if (address['OS-EXT-IPS:type'] == 'floating' and
-                        address['addr'] == floating_ip['floating_ip_address']):
-                    return address
-
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
@@ -132,15 +125,8 @@
             fip = self.create_floating_ip(server)
             floating_ip = self.associate_floating_ip(
                 fip, server)
-            # fetch the server again to make sure the addresses were refreshed
-            # after associating the floating IP
-            server = self.servers_client.show_server(server['id'])['server']
-            address = self._get_floating_ip_in_server_addresses(
-                floating_ip, server)
-            self.assertIsNotNone(
-                address,
-                "Failed to find floating IP '%s' in server addresses: %s" %
-                (floating_ip['floating_ip_address'], server['addresses']))
+            waiters.wait_for_server_floating_ip(self.servers_client,
+                                                server, floating_ip)
             ssh_ip = floating_ip['floating_ip_address']
         else:
             ssh_ip = self.get_server_ip(server)
@@ -165,19 +151,6 @@
         if floating_ip:
             # delete the floating IP, this should refresh the server addresses
             self.disassociate_floating_ip(floating_ip)
-
-            def is_floating_ip_detached_from_server():
-                server_info = self.servers_client.show_server(
-                    server['id'])['server']
-                address = self._get_floating_ip_in_server_addresses(
-                    floating_ip, server_info)
-                return (not address)
-
-            if not test_utils.call_until_true(
-                is_floating_ip_detached_from_server,
-                CONF.compute.build_timeout,
-                CONF.compute.build_interval):
-                msg = ("Floating IP '%s' should not be in server addresses: %s"
-                       % (floating_ip['floating_ip_address'],
-                          server['addresses']))
-                raise exceptions.TimeoutException(msg)
+            waiters.wait_for_server_floating_ip(
+                self.servers_client, server, floating_ip,
+                wait_for_disassociate=True)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index add5c32..cbe8c20 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -329,13 +329,16 @@
         floating_ip, server = self.floating_ip_tuple
         # get internal ports' ips:
         # get all network and compute ports in the new network
+        # NOTE(ralonsoh): device_owner="network:distributed" ports are OVN
+        # metadata ports and should be filtered out.
         internal_ips = (
             p['fixed_ips'][0]['ip_address'] for p in
             self.os_admin.ports_client.list_ports(
                 project_id=server['tenant_id'],
                 network_id=network['id'])['ports']
-            if p['device_owner'].startswith('network') or
-            p['device_owner'].startswith('compute')
+            if ((p['device_owner'].startswith('network') and
+                 not p['device_owner'] == 'network:distributed') or
+                p['device_owner'].startswith('compute'))
         )
 
         self._check_server_connectivity(floating_ip,
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
new file mode 100644
index 0000000..365eb1b
--- /dev/null
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -0,0 +1,1072 @@
+# Copyright (c) 2019 Ericsson
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+
+
+class NetworkQoSPlacementTestBase(manager.NetworkScenarioTest):
+    """Base class for Network QoS testing
+
+    Base class for testing Network QoS scenarios involving placement
+    resource allocations.
+    """
+    credentials = ['primary', 'admin']
+    # The feature QoS minimum bandwidth allocation in Placement API depends on
+    # Granular resource requests to GET /allocation_candidates and Support
+    # allocation candidates with nested resource providers features in
+    # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
+    # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
+    # means that the minimum placement microversion is 1.29
+    placement_min_microversion = '1.29'
+    placement_max_microversion = 'latest'
+
+    # Nova rejects to boot VM with port which has resource_request field, below
+    # microversion 2.72
+    compute_min_microversion = '2.72'
+    compute_max_microversion = 'latest'
+
+    INGRESS_DIRECTION = 'ingress'
+    EGRESS_DIRECTION = 'egress'
+    ANY_DIRECTION = 'any'
+    INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+    EGRESS_RESOURCE_CLASS = "NET_BW_EGR_KILOBIT_PER_SEC"
+
+    # For any realistic inventory value (that is inventory != MAX_INT) an
+    # allocation candidate request of MAX_INT is expected to be rejected, see:
+    # https://github.com/openstack/placement/blob/master/placement/
+    # db/constants.py#L16
+    PLACEMENT_MAX_INT = 0x7FFFFFFF
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.placement_client = cls.os_admin.placement_client
+        cls.networks_client = cls.os_admin.networks_client
+        cls.subnets_client = cls.os_admin.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
+        cls.routers_client = cls.os_adm.routers_client
+        cls.qos_client = cls.os_admin.qos_client
+        cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+        cls.flavors_client = cls.os_adm.flavors_client
+        cls.servers_client = cls.os_primary.servers_client
+
+    def _create_flavor_to_resize_to(self):
+        old_flavor = self.flavors_client.show_flavor(
+            CONF.compute.flavor_ref)['flavor']
+        new_flavor = self.flavors_client.create_flavor(**{
+            'ram': old_flavor['ram'],
+            'vcpus': old_flavor['vcpus'],
+            'name': old_flavor['name'] + 'extra-%s' % data_utils.rand_int_id(),
+            'disk': old_flavor['disk'] + 1
+        })['flavor']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.flavors_client.delete_flavor, new_flavor['id'])
+        return new_flavor
+
+
+class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase):
+
+    required_extensions = ['port-resource-request',
+                           'qos',
+                           'qos-bw-minimum-ingress']
+
+    SMALLEST_POSSIBLE_BW = 1
+    BANDWIDTH_1 = 1000
+    BANDWIDTH_2 = 2000
+
+    @classmethod
+    def skip_checks(cls):
+        super(MinBwAllocationPlacementTest, cls).skip_checks()
+        if not CONF.network_feature_enabled.qos_placement_physnet:
+            msg = "Skipped as no physnet is available in config for " \
+                  "placement based QoS allocation."
+            raise cls.skipException(msg)
+
+    def setUp(self):
+        super(MinBwAllocationPlacementTest, self).setUp()
+        self._check_if_allocation_is_possible()
+
+    def _create_policy_and_min_bw_rule(
+        self, name_prefix, min_kbps, direction="ingress"
+    ):
+        policy = self.qos_client.create_qos_policy(
+            name=data_utils.rand_name(name_prefix),
+            shared=True)['policy']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.qos_client.delete_qos_policy, policy['id'])
+        rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+            policy['id'],
+            **{
+                'min_kbps': min_kbps,
+                'direction': direction,
+            })['minimum_bandwidth_rule']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
+            rule['id'])
+
+        return policy
+
+    def _create_qos_basic_policies(self):
+        self.qos_policy_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_valid',
+            min_kbps=self.SMALLEST_POSSIBLE_BW)
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+
+    def _create_qos_policies_from_life(self):
+        # For tempest-slow the max bandwidth configured is 1000000,
+        # https://opendev.org/openstack/tempest/src/branch/master/
+        # .zuul.yaml#L416-L420
+        self.qos_policy_1 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_1',
+            min_kbps=self.BANDWIDTH_1
+        )
+        self.qos_policy_2 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_2',
+            min_kbps=self.BANDWIDTH_2
+        )
+
+    def _create_network_and_qos_policies(self, policy_method):
+        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+        base_segm = \
+            CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+        self.prov_network, _, _ = self.setup_network_subnet_with_router(
+            networks_client=self.networks_client,
+            routers_client=self.routers_client,
+            subnets_client=self.subnets_client,
+            **{
+                'shared': True,
+                'provider:network_type': 'vlan',
+                'provider:physical_network': physnet_name,
+                'provider:segmentation_id': base_segm
+            })
+
+        policy_method()
+
+    def _check_if_allocation_is_possible(self):
+        alloc_candidates = self.placement_client.list_allocation_candidates(
+            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+                                  self.SMALLEST_POSSIBLE_BW))
+        if len(alloc_candidates['provider_summaries']) == 0:
+            self.fail('No allocation candidates are available for %s:%s' %
+                      (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+
+        # Just to be sure check with impossible high (placement max_int),
+        # allocation
+        alloc_candidates = self.placement_client.list_allocation_candidates(
+            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+                                  self.PLACEMENT_MAX_INT))
+        if len(alloc_candidates['provider_summaries']) != 0:
+            self.fail('For %s:%s there should be no available candidate!' %
+                      (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+
+    def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+        wait_until = (None if status == 'ERROR' else status)
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+        server = self.create_server(networks=[{'port': port['id']}],
+                                    wait_until=wait_until)
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status=status, ready_wait=False, raise_on_error=False)
+        return server, port
+
+    def _assert_allocation_is_as_expected(
+        self, consumer, port_ids, min_kbps=SMALLEST_POSSIBLE_BW,
+        expected_rc=NetworkQoSPlacementTestBase.INGRESS_RESOURCE_CLASS,
+    ):
+        allocations = self.placement_client.list_allocations(
+            consumer)['allocations']
+        self.assertGreater(len(allocations), 0)
+        bw_resource_in_alloc = False
+        allocation_rp = None
+        for rp, resources in allocations.items():
+            if expected_rc in resources['resources']:
+                self.assertEqual(
+                    min_kbps,
+                    resources['resources'][expected_rc])
+                bw_resource_in_alloc = True
+                allocation_rp = rp
+        if min_kbps:
+            self.assertTrue(
+                bw_resource_in_alloc,
+                f"expected {min_kbps} bandwidth allocation from {expected_rc} "
+                f"but instance has allocation {allocations} instead."
+            )
+
+            # Check binding_profile of the port is not empty and equals with
+            # the rp uuid
+            for port_id in port_ids:
+                port = self.os_admin.ports_client.show_port(port_id)
+                port_binding_alloc = port['port']['binding:profile'][
+                    'allocation']
+                # NOTE(gibi): the format of the allocation key depends on the
+                # existence of port-resource-request-groups API extension.
+                # TODO(gibi): drop the else branch once tempest does not need
+                # to support Xena release any more.
+                if utils.is_extension_enabled(
+                        'port-resource-request-groups', 'network'):
+                    self.assertEqual(
+                        {allocation_rp},
+                        set(port_binding_alloc.values()))
+                else:
+                    self.assertEqual(allocation_rp, port_binding_alloc)
+
+    @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_basic(self):
+        """"Basic scenario with QoS min bw allocation in placement.
+
+        Steps:
+        * Create prerequisites:
+        ** VLAN type provider network with subnet.
+        ** valid QoS policy with minimum bandwidth rule with min_kbps=1
+        (This is a simplification to skip the checks in placement for
+        detecting the resource provider tree and inventories, as if
+        bandwidth resource is available 1 kbs will be available).
+        ** invalid QoS policy with minimum bandwidth rule with
+        min_kbs=max integer from placement (this is a simplification again
+        to avoid detection of RP tress and inventories, as placement will
+        reject such big allocation).
+        * Create port with valid QoS policy, and boot VM with that, it should
+        pass.
+        * Create port with invalid QoS policy, and try to boot VM with that,
+        it should fail.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server1, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server1['id'],
+                                               [valid_port['id']])
+
+        server2, not_valid_port = self._boot_vm_with_min_bw(
+            self.qos_policy_not_valid['id'], status='ERROR')
+        allocations = self.placement_client.list_allocations(server2['id'])
+
+        self.assertEqual(0, len(allocations['allocations']))
+        server2 = self.servers_client.show_server(server2['id'])
+        self.assertIn('fault', server2['server'])
+        self.assertIn('No valid host', server2['server']['fault']['message'])
+        # Check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(not_valid_port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration is not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network')
+    def test_migrate_with_qos_min_bw_allocation(self):
+        """Scenario to migrate VM with QoS min bw allocation in placement
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * migrate the server
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.os_adm.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.os_adm.servers_client.confirm_resize_server(
+            server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_resize_with_qos_min_bw_allocation(self):
+        """Scenario to resize VM with QoS min bw allocation in placement.
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * resize the server with new flavor
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy(self):
+        """Test the update of QoS policy on bound port
+
+        Related RFE in neutron: #1882804
+        The scenario is the following:
+        * Have a port with QoS policy and minimum bandwidth rule.
+        * Boot a VM with the port.
+        * Update the port with a new policy with different minimum bandwidth
+        values.
+        * The allocation on placement side should be according to the new
+        rules.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_2)
+
+        # I changed my mind
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_1['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        # bad request....
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+        port_orig = self.ports_client.show_port(port['id'])['port']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.ports_client.update_port,
+            port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        port_upd = self.ports_client.show_port(port['id'])['port']
+        self.assertEqual(port_orig['qos_policy_id'],
+                         port_upd['qos_policy_id'])
+        self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
+
+    @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_from_zero(self):
+        """Test port without QoS policy to have QoS policy
+
+        This scenario checks if updating a port without QoS policy to
+        have QoS policy with minimum_bandwidth rule succeeds only on
+        controlplane, but placement allocation remains 0.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(self.prov_network['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+        self.ports_client.update_port(
+            port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_to_zero(self):
+        """Test port with QoS policy to remove QoS policy
+
+        In this scenario port with QoS minimum_bandwidth rule update to
+        remove QoS policy results in 0 placement allocation.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': None})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_with_multiple_ports(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port1 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+        port2 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port1['id']}, {'port': port2['id']}])
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            self.BANDWIDTH_1 + self.BANDWIDTH_2)
+
+        self.ports_client.update_port(
+            port1['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            2 * self.BANDWIDTH_2)
+
+    @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
+    @utils.services('compute', 'network')
+    def test_empty_update(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+        self.ports_client.update_port(
+            port['id'],
+            **{'description': 'foo'})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+    @decorators.idempotent_id('372b2728-cfed-469a-b5f6-b75779e1ccbe')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_direction_change(self):
+        """Test QoS min bw direction change on a bound port
+
+        Related RFE in neutron: #1882804
+        The scenario is the following:
+        * Have a port with QoS policy and minimum bandwidth rule with ingress
+        direction
+        * Boot a VM with the port.
+        * Update the port with a new policy to egress direction in
+        minimum bandwidth rule.
+        * The allocation on placement side should be according to the new
+        rules.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        def create_policies():
+            self.qos_policy_ingress = self._create_policy_and_min_bw_rule(
+                name_prefix='test_policy_ingress',
+                min_kbps=self.BANDWIDTH_1,
+                direction=self.INGRESS_DIRECTION,
+            )
+            self.qos_policy_egress = self._create_policy_and_min_bw_rule(
+                name_prefix='test_policy_egress',
+                min_kbps=self.BANDWIDTH_1,
+                direction=self.EGRESS_DIRECTION,
+            )
+
+        self._create_network_and_qos_policies(create_policies)
+
+        port = self.create_port(
+            self.prov_network['id'],
+            qos_policy_id=self.qos_policy_ingress['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port['id']], self.BANDWIDTH_1,
+            expected_rc=self.INGRESS_RESOURCE_CLASS)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=self.qos_policy_egress['id'])
+
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port['id']], self.BANDWIDTH_1,
+            expected_rc=self.EGRESS_RESOURCE_CLASS)
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port['id']], 0,
+            expected_rc=self.INGRESS_RESOURCE_CLASS)
+
+
+class QoSBandwidthAndPacketRateTests(NetworkQoSPlacementTestBase):
+
+    PPS_RESOURCE_CLASS = "NET_PACKET_RATE_KILOPACKET_PER_SEC"
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+        if not CONF.network_feature_enabled.qos_min_bw_and_pps:
+            msg = (
+                "Skipped as no resource inventories are configured for QoS "
+                "minimum bandwidth and packet rate testing.")
+            raise cls.skipException(msg)
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.qos_min_pps_client = cls.os_admin.qos_min_pps_client
+
+    def setUp(self):
+        super().setUp()
+        self.network = self._create_network()
+
+    def _create_qos_policy_with_bw_and_pps_rules(self, min_kbps, min_kpps):
+        policy = self.qos_client.create_qos_policy(
+            name=data_utils.rand_name(),
+            shared=True
+        )['policy']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.qos_client.delete_qos_policy,
+            policy['id']
+        )
+
+        if min_kbps > 0:
+            bw_rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+                policy['id'],
+                min_kbps=min_kbps,
+                direction=self.INGRESS_DIRECTION
+            )['minimum_bandwidth_rule']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                self.qos_min_bw_client.delete_minimum_bandwidth_rule,
+                policy['id'],
+                bw_rule['id']
+            )
+
+        if min_kpps > 0:
+            pps_rule = self.qos_min_pps_client.create_minimum_packet_rate_rule(
+                policy['id'],
+                min_kpps=min_kpps,
+                direction=self.ANY_DIRECTION
+            )['minimum_packet_rate_rule']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+                policy['id'],
+                pps_rule['id']
+            )
+
+        return policy
+
+    def _create_network(self):
+        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+        base_segm = (
+            CONF.network_feature_enabled.provider_net_base_segmentation_id)
+
+        # setup_network_subnet_with_router will add the necessary cleanup calls
+        network, _, _ = self.setup_network_subnet_with_router(
+            networks_client=self.networks_client,
+            routers_client=self.routers_client,
+            subnets_client=self.subnets_client,
+            shared=True,
+            **{
+                'provider:network_type': 'vlan',
+                'provider:physical_network': physnet_name,
+                # +1 to be different from the segmentation_id used in
+                # MinBwAllocationPlacementTest
+                'provider:segmentation_id': int(base_segm) + 1,
+            }
+        )
+        return network
+
+    def _create_port_with_qos_policy(self, policy):
+        port = self.ports_client.create_port(
+            name=data_utils.rand_name(self.__class__.__name__),
+            network_id=self.network['id'],
+            qos_policy_id=policy['id'] if policy else None,
+        )['port']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.ports_client.delete_port, port['id']
+        )
+        return port
+
+    def assert_allocations(
+            self, server, port, expected_min_kbps, expected_min_kpps
+    ):
+        allocations = self.placement_client.list_allocations(
+            server['id'])['allocations']
+
+        # one allocation for the flavor related resources on the compute RP
+        expected_allocation = 1
+        # one allocation due to bw rule
+        if expected_min_kbps > 0:
+            expected_allocation += 1
+        # one allocation due to pps rule
+        if expected_min_kpps > 0:
+            expected_allocation += 1
+        self.assertEqual(expected_allocation, len(allocations), allocations)
+
+        expected_rp_uuids_in_binding_allocation = set()
+
+        if expected_min_kbps > 0:
+            bw_rp_allocs = {
+                rp: alloc['resources'][self.INGRESS_RESOURCE_CLASS]
+                for rp, alloc in allocations.items()
+                if self.INGRESS_RESOURCE_CLASS in alloc['resources']
+            }
+            self.assertEqual(1, len(bw_rp_allocs))
+            bw_rp, bw_alloc = list(bw_rp_allocs.items())[0]
+            self.assertEqual(expected_min_kbps, bw_alloc)
+            expected_rp_uuids_in_binding_allocation.add(bw_rp)
+
+        if expected_min_kpps > 0:
+            pps_rp_allocs = {
+                rp: alloc['resources'][self.PPS_RESOURCE_CLASS]
+                for rp, alloc in allocations.items()
+                if self.PPS_RESOURCE_CLASS in alloc['resources']
+            }
+            self.assertEqual(1, len(pps_rp_allocs))
+            pps_rp, pps_alloc = list(pps_rp_allocs.items())[0]
+            self.assertEqual(expected_min_kpps, pps_alloc)
+            expected_rp_uuids_in_binding_allocation.add(pps_rp)
+
+        # Let's check port.binding:profile.allocation points to the two
+        # provider resource allocated from
+        port = self.os_admin.ports_client.show_port(port['id'])
+        port_binding_alloc = port[
+            'port']['binding:profile'].get('allocation', {})
+        self.assertEqual(
+            expected_rp_uuids_in_binding_allocation,
+            set(port_binding_alloc.values())
+        )
+
+    def assert_no_allocation(self, server, port):
+        # check that there are no allocations
+        allocations = self.placement_client.list_allocations(
+            server['id'])['allocations']
+        self.assertEqual(0, len(allocations))
+
+        # check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('93d1a88d-235e-4b7b-b44d-2a17dcf4e213')
+    @utils.services('compute', 'network')
+    def test_server_create_delete(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+        self.assert_no_allocation(server, port)
+
+    def _test_create_server_negative(self, min_kbps=1000, min_kpps=100):
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until=None)
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ERROR', ready_wait=False, raise_on_error=False)
+
+        # check that the creation failed with No valid host
+        server = self.servers_client.show_server(server['id'])['server']
+        self.assertIn('fault', server)
+        self.assertIn('No valid host', server['fault']['message'])
+
+        self.assert_no_allocation(server, port)
+
+    @decorators.idempotent_id('915dd2ce-4890-40c8-9db6-f3e04080c6c1')
+    @utils.services('compute', 'network')
+    def test_server_create_no_valid_host_due_to_bandwidth(self):
+        self._test_create_server_negative(min_kbps=self.PLACEMENT_MAX_INT)
+
+    @decorators.idempotent_id('2d4a755e-10b9-4ac0-bef2-3f89de1f150b')
+    @utils.services('compute', 'network')
+    def test_server_create_no_valid_host_due_to_packet_rate(self):
+        self._test_create_server_negative(min_kpps=self.PLACEMENT_MAX_INT)
+
+    @decorators.idempotent_id('69d93e4f-0dfc-4d17-8d84-cc5c3c842cd5')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.resize, 'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_server_resize(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id']
+        )
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('d01d4aee-ca06-4e4e-add7-8a47fe0daf96')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.resize, 'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_server_resize_revert(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id']
+        )
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.servers_client.revert_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('bdd0b31c-c8b0-4b7b-b80a-545a46b32abe')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.cold_migration,
+        'Cold migration is not available.')
+    @testtools.skipUnless(
+        CONF.compute.min_compute_nodes > 1,
+        'Less than 2 compute nodes, skipping multinode tests.')
+    @utils.services('compute', 'network')
+    def test_server_migrate(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.os_adm.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.os_adm.servers_client.confirm_resize_server(
+            server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+    @decorators.idempotent_id('fdb260e3-caa5-482d-ac7c-8c22adf3d750')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        min_kbps2 = 2000
+        min_kpps2 = 50
+        policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps2, min_kpps2)
+
+        port = self._create_port_with_qos_policy(policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy2['id'])
+
+        self.assert_allocations(server, port, min_kbps2, min_kpps2)
+
+    @decorators.idempotent_id('e6a20125-a02e-49f5-bcf6-894305ee3715')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_from_null_policy(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=None)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, 0, 0)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy['id'])
+
+        # NOTE(gibi): This is unintuitive but it is the expected behavior.
+        # If there was no policy attached to the port when the server was
+        # created then neutron still allows adding a policy to the port later
+        # as this operation was support before placement enforcement was added
+        # for the qos minimum bandwidth rule. However neutron cannot create
+        # the placement resource allocation for this port.
+        self.assert_allocations(server, port, 0, 0)
+
+    @decorators.idempotent_id('f5864761-966c-4e49-b430-ac0044b7d658')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_additional_rule(self):
+        min_kbps = 1000
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, 0)
+
+        min_kbps2 = 2000
+        min_kpps2 = 50
+        policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps2, min_kpps2)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, 0)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=policy2['id'])
+
+        # FIXME(gibi): Agree in the spec: do we ignore the pps request or we
+        # reject the update? It seems current implementation goes with
+        # ignoring the additional pps rule.
+        self.assert_allocations(server, port, min_kbps2, 0)
+
+    @decorators.idempotent_id('fbbb9c81-ed21-48c3-bdba-ce2361e93aad')
+    @utils.services('compute', 'network')
+    def test_qos_policy_update_on_bound_port_to_null_policy(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        self.ports_client.update_port(
+            port['id'],
+            qos_policy_id=None)
+
+        self.assert_allocations(server, port, 0, 0)
+
+    @decorators.idempotent_id('0393d038-03ad-4844-a0e4-83010f69dabb')
+    @utils.services('compute', 'network')
+    def test_interface_attach_detach(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=None)
+
+        port2 = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, 0, 0)
+
+        self.interface_client.create_interface(
+            server_id=server['id'],
+            port_id=port2['id'])
+        waiters.wait_for_interface_status(
+            self.interface_client, server['id'], port2['id'], 'ACTIVE')
+
+        self.assert_allocations(server, port2, min_kbps, min_kpps)
+
+        req_id = self.interface_client.delete_interface(
+            server_id=server['id'],
+            port_id=port2['id']).response['x-openstack-request-id']
+        waiters.wait_for_interface_detach(
+            self.servers_client, server['id'], port2['id'], req_id)
+
+        self.assert_allocations(server, port2, 0, 0)
+
+    @decorators.idempotent_id('36ffdb85-6cc2-4cc9-a426-cad5bac8626b')
+    @testtools.skipUnless(
+        CONF.compute.min_compute_nodes > 1,
+        'Less than 2 compute nodes, skipping multinode tests.')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.live_migration,
+        'Live migration not available')
+    @utils.services('compute', 'network')
+    def test_server_live_migrate(self):
+        min_kbps = 1000
+        min_kpps = 100
+        policy = self._create_qos_policy_with_bw_and_pps_rules(
+            min_kbps, min_kpps)
+
+        port = self._create_port_with_qos_policy(policy=policy)
+
+        server = self.create_server(
+            networks=[{'port': port['id']}],
+            wait_until='ACTIVE'
+        )
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
+
+        server_details = self.os_adm.servers_client.show_server(server['id'])
+        source_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+        self.os_adm.servers_client.live_migrate_server(
+            server['id'], block_migration=True, host=None)
+        waiters.wait_for_server_status(
+            self.servers_client, server['id'], 'ACTIVE')
+
+        server_details = self.os_adm.servers_client.show_server(server['id'])
+        new_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+        self.assertNotEqual(source_host, new_host, "Live migration failed")
+
+        self.assert_allocations(server, port, min_kbps, min_kpps)
diff --git a/tempest/test.py b/tempest/test.py
index 8ea3b16..dba2695 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,7 +26,7 @@
 from tempest.common import credentials_factory as credentials
 from tempest.common import utils
 from tempest import config
-from tempest.lib import base as lib_base
+from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import fixed_network
 from tempest.lib.common import profiler
 from tempest.lib.common import validation_resources as vr
@@ -141,19 +141,6 @@
         # It should never be overridden by descendants
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
-        # All the configuration checks that may generate a skip
-        # TODO(gmann): cls.handle_skip_exception is really workaround for
-        # testtools bug- https://github.com/testing-cabal/testtools/issues/272
-        # stestr which is used by Tempest internally to run the test switch
-        # the customize test runner(which use stdlib unittest) for >=py3.5
-        # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
-        # These two test runner are not compatible due to skip exception
-        # handling(due to unittest2). testtools.run treat unittestt.SkipTest
-        # as error and stdlib unittest treat unittest2.case.SkipTest raised
-        # by testtools.TestCase.skipException.
-        # The below workaround can be removed once testtools fix issue# 272.
-        orig_skip_exception = testtools.TestCase.skipException
-        lib_base._handle_skip_exception()
         try:
             cls.skip_checks()
 
@@ -181,8 +168,6 @@
                 raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
-        finally:
-            testtools.TestCase.skipException = orig_skip_exception
 
     @classmethod
     def tearDownClass(cls):
@@ -480,6 +465,34 @@
         pass
 
     @classmethod
+    def setup_api_microversion_fixture(
+            cls, compute_microversion=None, volume_microversion=None,
+            placement_microversion=None):
+        """Set up api microversion fixture on service clients.
+
+        `setup_api_microversion_fixture` is used to set the api microversion
+        on service clients. This can be invoked from resource_setup() method.
+
+        Example::
+
+            @classmethod
+            def resource_setup(cls):
+                super(MyTest, cls).resource_setup()
+                cls.setup_api_microversion_fixture(
+                    compute_microversion=cls.compute_request_microversion,
+                    volume_microversion=cls.volume_request_microversion,
+                    placement_microversion=cls.placement_request_microversion)
+
+        """
+
+        api_fixture = api_microversion_fixture.APIMicroversionFixture(
+            compute_microversion=compute_microversion,
+            volume_microversion=volume_microversion,
+            placement_microversion=placement_microversion)
+        api_fixture.setUp()
+        cls.addClassResourceCleanup(api_fixture._reset_microversion)
+
+    @classmethod
     def resource_setup(cls):
         """Class level resource setup for test cases.
 
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 5816ab1..a19f20b 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -13,15 +13,10 @@
 #    under the License.
 
 import os
-import sys
+import unittest
 
 from tempest.test_discover import plugins
 
-if sys.version_info >= (2, 7):
-    import unittest
-else:
-    import unittest2 as unittest
-
 
 def load_tests(loader, tests, pattern):
     ext_plugins = plugins.TempestTestPluginManager()
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 5cdbfbf..1d0ee77 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -186,69 +186,96 @@
                                          mock.call('server_id', 'port_id')])
         sleep.assert_called_once_with(client.build_interval)
 
-    one_interface = {'interfaceAttachments': [{'port_id': 'port_one'}]}
-    two_interfaces = {'interfaceAttachments': [{'port_id': 'port_one'},
-                                               {'port_id': 'port_two'}]}
-
     def test_wait_for_interface_detach(self):
-        list_interfaces = mock.MagicMock(
-            side_effect=[self.two_interfaces, self.one_interface])
-        client = self.mock_client(list_interfaces=list_interfaces)
+        no_event = {
+            'instanceAction': {
+                'events': []
+            }
+        }
+        one_event_without_result = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': None
+                    }
+
+                ]
+            }
+        }
+        one_event_successful = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': 'Success'
+                    }
+                ]
+            }
+        }
+
+        show_instance_action = mock.MagicMock(
+            # there is an extra call to return the result from the waiter
+            side_effect=[
+                no_event,
+                one_event_without_result,
+                one_event_successful,
+                one_event_successful,
+            ]
+        )
+        client = self.mock_client(show_instance_action=show_instance_action)
         self.patch('time.time', return_value=0.)
         sleep = self.patch('time.sleep')
 
         result = waiters.wait_for_interface_detach(
-            client, 'server_id', 'port_two')
+            client, mock.sentinel.server_id, mock.sentinel.port_id,
+            mock.sentinel.detach_request_id
+        )
 
-        self.assertIs(self.one_interface['interfaceAttachments'], result)
-        list_interfaces.assert_has_calls([mock.call('server_id'),
-                                          mock.call('server_id')])
-        sleep.assert_called_once_with(client.build_interval)
+        self.assertIs(one_event_successful['instanceAction'], result)
+        show_instance_action.assert_has_calls(
+            # there is an extra call to return the result from the waiter
+            [
+                mock.call(
+                    mock.sentinel.server_id, mock.sentinel.detach_request_id)
+            ] * 4
+        )
+        sleep.assert_has_calls([mock.call(client.build_interval)] * 2)
 
     def test_wait_for_interface_detach_timeout(self):
-        list_interfaces = mock.MagicMock(return_value=self.one_interface)
-        client = self.mock_client(list_interfaces=list_interfaces)
+        one_event_without_result = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': None
+                    }
+
+                ]
+            }
+        }
+
+        show_instance_action = mock.MagicMock(
+            return_value=one_event_without_result)
+        client = self.mock_client(show_instance_action=show_instance_action)
         self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
         sleep = self.patch('time.sleep')
 
-        self.assertRaises(lib_exc.TimeoutException,
-                          waiters.wait_for_interface_detach,
-                          client, 'server_id', 'port_one')
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_interface_detach,
+            client, mock.sentinel.server_id, mock.sentinel.port_id,
+            mock.sentinel.detach_request_id
+        )
 
-        list_interfaces.assert_has_calls([mock.call('server_id'),
-                                          mock.call('server_id')])
+        show_instance_action.assert_has_calls(
+            [
+                mock.call(
+                    mock.sentinel.server_id, mock.sentinel.detach_request_id)
+            ] * 2
+        )
         sleep.assert_called_once_with(client.build_interval)
 
-    def test_wait_for_guest_os_boot(self):
-        get_console_output = mock.Mock(
-            side_effect=[
-                {'output': 'os not ready yet\n'},
-                {'output': 'login:\n'}
-            ])
-        client = self.mock_client(get_console_output=get_console_output)
-        self.patch('time.time', return_value=0.)
-        sleep = self.patch('time.sleep')
-
-        with mock.patch.object(waiters.LOG, "info") as log_info:
-            waiters.wait_for_guest_os_boot(client, 'server_id')
-
-        get_console_output.assert_has_calls([
-            mock.call('server_id'), mock.call('server_id')])
-        sleep.assert_called_once_with(client.build_interval)
-        log_info.assert_not_called()
-
-    def test_wait_for_guest_os_boot_timeout(self):
-        get_console_output = mock.Mock(
-            return_value={'output': 'os not ready yet\n'})
-        client = self.mock_client(get_console_output=get_console_output)
-        self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
-        self.patch('time.sleep')
-
-        with mock.patch.object(waiters.LOG, "info") as log_info:
-            waiters.wait_for_guest_os_boot(client, 'server_id')
-
-        log_info.assert_called_once()
-
 
 class TestVolumeWaiters(base.TestCase):
     vol_migrating_src_host = {
@@ -495,3 +522,101 @@
         # Assert that list_volume_attachments was actually called
         mock_list_volume_attachments.assert_called_once_with(
             mock.sentinel.server_id)
+
+    @mock.patch('os.system')
+    def test_wait_for_ping_host_alive(self, mock_ping):
+        mock_ping.return_value = 0
+        # Assert that nothing is raised as the host is alive
+        waiters.wait_for_ping('127.0.0.1', 10, 1)
+
+    @mock.patch('os.system')
+    def test_wait_for_ping_host_eventually_alive(self, mock_ping):
+        mock_ping.side_effect = [1, 1, 0]
+        # Assert that nothing is raised when the host is eventually alive
+        waiters.wait_for_ping('127.0.0.1', 10, 1)
+
+    @mock.patch('os.system')
+    def test_wait_for_ping_timeout(self, mock_ping):
+        mock_ping.return_value = 1
+        # Assert that TimeoutException is raised when the host is dead
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_ping,
+            '127.0.0.1',
+            .1,
+            .1
+        )
+
+    def test_wait_for_ssh(self):
+        mock_ssh_client = mock.Mock()
+        mock_ssh_client.validate_authentication.return_value = True
+        # Assert that nothing is raised when validate_authentication returns
+        waiters.wait_for_ssh(mock_ssh_client, .1)
+        mock_ssh_client.validate_authentication.assert_called_once()
+
+    def test_wait_for_ssh_eventually_up(self):
+        mock_ssh_client = mock.Mock()
+        timeout = lib_exc.SSHTimeout(
+            host='foo',
+            username='bar',
+            password='fizz'
+        )
+        mock_ssh_client.validate_authentication.side_effect = [
+            timeout,
+            timeout,
+            True
+        ]
+        # Assert that nothing is raised if validate_authentication passes
+        # before the timeout
+        waiters.wait_for_ssh(mock_ssh_client, 10)
+
+    def test_wait_for_ssh_timeout(self):
+        mock_ssh_client = mock.Mock()
+        timeout = lib_exc.SSHTimeout(
+            host='foo',
+            username='bar',
+            password='fizz'
+        )
+        mock_ssh_client.validate_authentication.side_effect = timeout
+        # Assert that TimeoutException is raised when validate_authentication
+        # doesn't pass in time.
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_ssh,
+            mock_ssh_client,
+            .1
+        )
+
+
+class TestServerFloatingIPWaiters(base.TestCase):
+
+    def test_wait_for_server_floating_ip_associate_timeout(self):
+        mock_server = {'server': {'id': 'fake_uuid', 'addresses': {}}}
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_timeout=1, build_interval=1,
+            show_server=lambda id: mock_server)
+
+        fake_server = {'id': 'fake-uuid'}
+        fake_fip = {'floating_ip_address': 'fake_address'}
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_server_floating_ip, mock_client, fake_server,
+            fake_fip)
+
+    def test_wait_for_server_floating_ip_disassociate_timeout(self):
+        mock_addresses = {'shared': [{'OS-EXT-IPS:type': 'floating',
+                                      'addr': 'fake_address'}]}
+        mock_server = {'server': {'id': 'fake_uuid',
+                                  'addresses': mock_addresses}}
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_timeout=1, build_interval=1,
+            show_server=lambda id: mock_server)
+
+        fake_server = {'id': 'fake-uuid'}
+        fake_fip = {'floating_ip_address': 'fake_address'}
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_server_floating_ip, mock_client, fake_server,
+            fake_fip, wait_for_disassociate=True)
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
index a621a75..edfb2c8 100644
--- a/tempest/tests/lib/cmd/test_check_uuid.py
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -28,36 +28,33 @@
            "    def test_tests(self):\n" \
            "        pass"
 
-    def create_tests_file(self, directory):
-        with open(directory + "/__init__.py", "w"):
-            pass
+    def setUp(self):
+        super(TestCLInterface, self).setUp()
+        self.directory = tempfile.mkdtemp(prefix='check-uuid', dir=".")
+        self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
 
-        tests_file = directory + "/tests.py"
-        with open(tests_file, "w") as fake_file:
+        init_file = open(self.directory + "/__init__.py", "w")
+        init_file.close()
+
+        self.tests_file = self.directory + "/tests.py"
+        with open(self.tests_file, "w") as fake_file:
             fake_file.write(TestCLInterface.CODE)
-
-        return tests_file
+            fake_file.close()
 
     def test_fix_argument_no(self):
-        temp_dir = tempfile.mkdtemp(prefix='check-uuid-no', dir=".")
-        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-        tests_file = self.create_tests_file(temp_dir)
         sys.argv = [sys.argv[0]] + ["--package",
-                                    os.path.relpath(temp_dir)]
+                                    os.path.relpath(self.directory)]
 
         self.assertRaises(SystemExit, check_uuid.run)
-        with open(tests_file, "r") as f:
+        with open(self.tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE == f.read())
 
     def test_fix_argument_yes(self):
-        temp_dir = tempfile.mkdtemp(prefix='check-uuid-yes', dir=".")
-        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
-        tests_file = self.create_tests_file(temp_dir)
 
         sys.argv = [sys.argv[0]] + ["--fix", "--package",
-                                    os.path.relpath(temp_dir)]
+                                    os.path.relpath(self.directory)]
         check_uuid.run()
-        with open(tests_file, "r") as f:
+        with open(self.tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE != f.read())
 
 
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index c5f6d7a..1dea5f5 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -144,11 +144,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_get_update_headers(self):
@@ -156,11 +156,11 @@
                                                extra_headers=True,
                                                headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_delete_update_headers(self):
@@ -168,11 +168,11 @@
                                                   extra_headers=True,
                                                   headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_patch_update_headers(self):
@@ -180,11 +180,11 @@
                                                  extra_headers=True,
                                                  headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_put_update_headers(self):
@@ -192,11 +192,11 @@
                                                extra_headers=True,
                                                headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_head_update_headers(self):
@@ -207,11 +207,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_copy_update_headers(self):
@@ -219,11 +219,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
 
diff --git a/tempest/tests/lib/services/image/v2/test_schemas_client.py b/tempest/tests/lib/services/image/v2/test_schemas_client.py
index 4c4b86a..9fb249b 100644
--- a/tempest/tests/lib/services/image/v2/test_schemas_client.py
+++ b/tempest/tests/lib/services/image/v2/test_schemas_client.py
@@ -75,12 +75,323 @@
         }
     }
 
+    FAKE_SHOW_SCHEMA_IMAGE = {
+        "additionalProperties": {
+            "type": "string"
+        },
+        "links": [
+            {
+                "href": "{self}",
+                "rel": "self"
+            },
+            {
+                "href": "{file}",
+                "rel": "enclosure"
+            },
+            {
+                "href": "{schema}",
+                "rel": "describedby"
+            }
+        ],
+        "name": "image",
+        "properties": {
+            "architecture": {
+                "description": "Operating system architecture as "
+                               "specified in https://docs.openstack.org/"
+                               "python-glanceclient/latest/cli"
+                               "/property-keys.html",
+                "is_base": False,
+                "type": "string"
+            },
+            "checksum": {
+                "description": "md5 hash of image contents.",
+                "maxLength": 32,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "container_format": {
+                "description": "Format of the container",
+                "enum": [
+                    None,
+                    "ami",
+                    "ari",
+                    "aki",
+                    "bare",
+                    "ovf",
+                    "ova",
+                    "docker"
+                ],
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "created_at": {
+                "description": "Date and time of image registration",
+                "readOnly": True,
+                "type": "string"
+            },
+            "direct_url": {
+                "description": "URL to access the image file "
+                               "kept in external store",
+                "readOnly": True,
+                "type": "string"
+            },
+            "disk_format": {
+                "description": "Format of the disk",
+                "enum": [
+                    None,
+                    "ami",
+                    "ari",
+                    "aki",
+                    "vhd",
+                    "vhdx",
+                    "vmdk",
+                    "raw",
+                    "qcow2",
+                    "vdi",
+                    "iso",
+                    "ploop"
+                ],
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "file": {
+                "description": "An image file url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "id": {
+                "description": "An identifier for the image",
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){12}$",
+                "type": "string"
+            },
+            "instance_uuid": {
+                "description": "Metadata which can be used to record which"
+                               " instance this image is associated with. "
+                               "(Informational only, does not create "
+                               "an instance snapshot.)",
+                "is_base": False,
+                "type": "string"
+            },
+            "kernel_id": {
+                "description": "ID of image stored in Glance that should "
+                               "be used as the kernel when booting an "
+                               "AMI-style image.",
+                "is_base": False,
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-"
+                           "([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-("
+                           "[0-9a-fA-F]){12}$",
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "locations": {
+                "description": "A set of URLs to access the image file "
+                               "kept in external store",
+                "items": {
+                    "properties": {
+                        "metadata": {
+                            "type": "object"
+                        },
+                        "url": {
+                            "maxLength": 255,
+                            "type": "string"
+                        }
+                    },
+                    "required": [
+                        "url",
+                        "metadata"
+                    ],
+                    "type": "object"
+                },
+                "type": "array"
+            },
+            "min_disk": {
+                "description": "Amount of disk space (in GB) "
+                               "required to boot image.",
+                "type": "integer"
+            },
+            "min_ram": {
+                "description": "Amount of ram (in MB) required "
+                               "to boot image.",
+                "type": "integer"
+            },
+            "name": {
+                "description": "Descriptive name for the image",
+                "maxLength": 255,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_distro": {
+                "description": "Common name of operating system distribution "
+                               "as specified in https://docs.openstack.org/"
+                               "python-glanceclient/latest/cli/"
+                               "property-keys.html",
+                "is_base": False,
+                "type": "string"
+            },
+            "os_hash_algo": {
+                "description": "Algorithm to calculate the os_hash_value",
+                "maxLength": 64,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_hash_value": {
+                "description": "Hexdigest of the image contents "
+                               "using the algorithm specified by "
+                               "the os_hash_algo",
+                "maxLength": 128,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_hidden": {
+                "description": "If true, image will not appear in default"
+                               " image list response.",
+                "type": "boolean"
+            },
+            "os_version": {
+                "description": "Operating system version as specified by "
+                               "the distributor",
+                "is_base": False,
+                "type": "string"
+            },
+            "owner": {
+                "description": "Owner of the image",
+                "maxLength": 255,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "protected": {
+                "description": "If true, image will not be deletable.",
+                "type": "boolean"
+            },
+            "ramdisk_id": {
+                "description": "ID of image stored in Glance that should"
+                               " be used as the ramdisk when booting an "
+                               "AMI-style image.",
+                "is_base": False,
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "schema": {
+                "description": "An image schema url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "self": {
+                "description": "An image self url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "size": {
+                "description": "Size of image file in bytes",
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "integer"
+                ]
+            },
+            "status": {
+                "description": "Status of the image",
+                "enum": [
+                    "queued",
+                    "saving",
+                    "active",
+                    "killed",
+                    "deleted",
+                    "pending_delete",
+                    "deactivated",
+                    "uploading",
+                    "importing"
+                ],
+                "readOnly": True,
+                "type": "string"
+            },
+            "tags": {
+                "description": "List of strings related to the image",
+                "items": {
+                    "maxLength": 255,
+                    "type": "string"
+                },
+                "type": "array"
+            },
+            "updated_at": {
+                "description": "Date and time of the last image modification",
+                "readOnly": True,
+                "type": "string"
+            },
+            "virtual_size": {
+                "description": "Virtual size of image in bytes",
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "integer"
+                ]
+            },
+            "visibility": {
+                "description": "Scope of image accessibility",
+                "enum": [
+                    "public",
+                    "private"
+                ],
+                "type": "string"
+            }
+        }
+    }
+
     def setUp(self):
         super(TestSchemasClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
         self.client = schemas_client.SchemasClient(fake_auth,
                                                    'image', 'regionOne')
 
+    def _test_show_schema_members(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_schema,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_SCHEMA,
+            bytes_body,
+            schema="members")
+
+    def _test_show_schema_image(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_schema,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_SCHEMA_IMAGE,
+            bytes_body,
+            schema="image")
+
+    def _test_show_schema_images(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_schema,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_SCHEMA_IMAGE,
+            bytes_body,
+            schema="images")
+
     def _test_show_schema(self, bytes_body=False):
         self.check_service_client_function(
             self.client.show_schema,
@@ -89,6 +400,24 @@
             bytes_body,
             schema="member")
 
+    def test_show_schema_members_with_str_body(self):
+        self._test_show_schema_members()
+
+    def test_show_schema_members_with_bytes_body(self):
+        self._test_show_schema_members(bytes_body=True)
+
+    def test_show_schema_image_with_str_body(self):
+        self._test_show_schema_image()
+
+    def test_show_schema_image_with_bytes_body(self):
+        self._test_show_schema_image(bytes_body=True)
+
+    def test_show_schema_images_with_str_body(self):
+        self._test_show_schema_images()
+
+    def test_show_schema_images_with_bytes_body(self):
+        self._test_show_schema_images(bytes_body=True)
+
     def test_show_schema_with_str_body(self):
         self._test_show_schema()
 
diff --git a/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..ce068e9
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
@@ -0,0 +1,156 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import floating_ips_port_forwarding_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestFloatingIpsPortForwardingClient(base.BaseServiceTest):
+
+    FAKE_PORT_FORWARDING_REQUEST = {
+
+        "port_forwarding": {
+            "protocol": "tcp",
+            "internal_ip_address": "10.0.0.11",
+            "internal_port": 25,
+            "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+            "external_port": 2230,
+            "description": "Some description",
+            }
+
+        }
+
+    FAKE_PORT_FORWARDING_RESPONSE = {
+
+        "port_forwarding": {
+            "protocol": "tcp",
+            "internal_ip_address": "10.0.0.12",
+            "internal_port": 26,
+            "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+            "external_port": 2130,
+            "description": "Some description",
+            "id": "825ade3c-9760-4880-8080-8fc2dbab9acc"
+        }
+    }
+
+    FAKE_PORT_FORWARDINGS = {
+        "port_forwardings": [
+            FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+        ]
+    }
+
+    FAKE_FLOATINGIP_ID = "a6800594-5b7a-4105-8bfe-723b346ce866"
+
+    FAKE_PORT_FORWARDING_ID = "a7800594-5b7a-4105-8bfe-723b346ce866"
+
+    def setUp(self):
+        super(TestFloatingIpsPortForwardingClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.floating_ips_port_forwarding_client = \
+            floating_ips_port_forwarding_client.\
+            FloatingIpsPortForwardingClient(fake_auth,
+                                            "network",
+                                            "regionOne")
+
+    def _test_create_port_forwarding(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            create_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_PORT_FORWARDING_RESPONSE,
+            bytes_body,
+            201,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            **self.FAKE_PORT_FORWARDING_REQUEST)
+
+    def _test_list_port_forwardings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            list_port_forwardings,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_PORT_FORWARDINGS,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID)
+
+    def _test_show_port_forwardings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            show_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_PORT_FORWARDING_RESPONSE,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+    def _test_delete_port_forwarding(self):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            delete_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+    def _test_update_port_forwarding(self, bytes_body=False):
+        update_kwargs = {
+            "internal_port": "27"
+        }
+
+        resp_body = {
+            "port_forwarding": copy.deepcopy(
+                self.FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+            )
+        }
+        resp_body["port_forwarding"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.update_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID,
+            **update_kwargs)
+
+    def test_list_port_forwardings_with_str_body(self):
+        self._test_list_port_forwardings()
+
+    def test_list_port_forwardings_with_bytes_body(self):
+        self._test_list_port_forwardings(bytes_body=True)
+
+    def test_show_port_forwardings_with_str_body(self):
+        self._test_show_port_forwardings()
+
+    def test_show_port_forwardings_with_bytes_body(self):
+        self._test_show_port_forwardings(bytes_body=True)
+
+    def test_create_port_forwarding_with_str_body(self):
+        self._test_create_port_forwarding()
+
+    def test_create_port_forwarding_with_bytes_body(self):
+        self._test_create_port_forwarding(bytes_body=True)
+
+    def test_update_port_forwarding_with_str_body(self):
+        self._test_update_port_forwarding()
+
+    def test_update_port_forwarding_with_bytes_body(self):
+        self._test_update_port_forwarding(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_loggable_resource_client.py b/tempest/tests/lib/services/network/test_loggable_resource_client.py
new file mode 100644
index 0000000..232775b
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_loggable_resource_client.py
@@ -0,0 +1,53 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import loggable_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLoggableResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "loggable_resources": [
+            {
+                "type": "security_group"
+            },
+            {
+                "type": "none"
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestLoggableResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.loggable_resource_client = \
+            loggable_resource_client.LoggableResourceClient(
+                fake_auth, "network", "regionOne")
+
+    def _test_list_loggable_resources(self, bytes_body=False):
+        self.check_service_client_function(
+            self.loggable_resource_client.list_loggable_resources,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def test_list_loggable_resources_with_str_body(self):
+        self._test_list_loggable_resources()
+
+    def test_list_loggable_resources_with_bytes_body(self):
+        self._test_list_loggable_resources(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..e83792d
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,124 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib import decorators
+
+from tempest.lib.services.network import qos_limit_bandwidth_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_log import log as logging
+LOG = logging.getLogger('tempest')
+
+
+class TestQosLimitBandwidthRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MAX_BW_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MAX_BW_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'max_kbps': 1000,
+        'max_burst_kbps': 0,
+        'direction': 'ingress'
+    }
+
+    FAKE_MAX_BW_RULE_RESPONSE = {
+        'bandwidth_limit_rule': {
+            'id': FAKE_MAX_BW_RULE_ID,
+            'max_kbps': 10000,
+            'max_burst_kbps': 0,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MAX_BW_RULES = {
+        'bandwidth_limit_rules': [
+            FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosLimitBandwidthRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_limit_bw_client = qos_limit_bandwidth_rules_client.\
+            QosLimitBandwidthRulesClient(fake_auth, "network", "regionOne")
+
+    @decorators.idempotent_id('cde981fa-e93b-11eb-aacb-74e5f9e2a801')
+    def test_create_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.create_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MAX_BW_RULE_REQUEST
+        )
+
+    @decorators.idempotent_id('86e6803a-e974-11eb-aacb-74e5f9e2a801')
+    def test_update_limit_bandwidth_rules(self, bytes_body=False):
+        update_kwargs = {
+            "max_kbps": "20000"
+        }
+
+        resp_body = {
+            "bandwidth_limit_rule": copy.deepcopy(
+                self.FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+            )
+        }
+        resp_body["bandwidth_limit_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_limit_bw_client.update_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID,
+            **update_kwargs)
+
+    @decorators.idempotent_id('be60ae6e-e979-11eb-aacb-74e5f9e2a801')
+    def test_show_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.show_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID
+        )
+
+    @decorators.idempotent_id('0a7c0964-e97b-11eb-aacb-74e5f9e2a801')
+    def test_delete_limit_bandwidth_rule(self):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.delete_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID)
+
+    @decorators.idempotent_id('08df88ae-e97d-11eb-aacb-74e5f9e2a801')
+    def test_list_minimum_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.list_limit_bandwidth_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
diff --git a/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..3cc3de3
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,135 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import qos_minimum_packet_rate_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestQosMinimumPacketRateRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MIN_PPS_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MIN_PPS_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'min_kpps': 1000,
+        'direction': 'ingress'
+    }
+
+    FAKE_MIN_PPS_RULE_RESPONSE = {
+        'minimum_packet_rate_rule': {
+            'id': FAKE_MIN_PPS_RULE_ID,
+            'min_kpps': 1000,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MIN_PPS_RULES = {
+        'minimum_packet_rate_rules': [
+            FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosMinimumPacketRateRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_min_pps_client = qos_minimum_packet_rate_rules_client.\
+            QosMinimumPacketRateRulesClient(fake_auth, "network", "regionOne")
+
+    def _test_create_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.create_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MIN_PPS_RULE_REQUEST
+        )
+
+    def _test_list_minimum_packet_rate_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.list_minimum_packet_rate_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
+
+    def _test_show_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.show_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID
+        )
+
+    def _test_update_qos_polcy(self, bytes_body=False):
+        update_kwargs = {
+            "min_kpps": "20000"
+        }
+
+        resp_body = {
+            "minimum_packet_rate_rule": copy.deepcopy(
+                self.FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+            )
+        }
+        resp_body["minimum_packet_rate_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_min_pps_client.update_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID,
+            **update_kwargs)
+
+    def test_create_minimum_packet_rate_rule_with_str_body(self):
+        self._test_create_minimum_packet_rate_rule()
+
+    def test_create_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_create_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_update_minimum_packet_rate_rule_with_str_body(self):
+        self._test_update_qos_polcy()
+
+    def test_update_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_update_qos_polcy(bytes_body=True)
+
+    def test_show_minimum_packet_rate_rule_with_str_body(self):
+        self._test_show_minimum_packet_rate_rule()
+
+    def test_show_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_show_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_delete_minimum_packet_rate_rule(self):
+        self.check_service_client_function(
+            self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID)
+
+    def test_list_minimum_packet_rate_rule_with_str_body(self):
+        self._test_list_minimum_packet_rate_rules()
+
+    def test_list_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_list_minimum_packet_rate_rules(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_routers_client.py b/tempest/tests/lib/services/network/test_routers_client.py
index f5dcc7d..20b6853 100644
--- a/tempest/tests/lib/services/network/test_routers_client.py
+++ b/tempest/tests/lib/services/network/test_routers_client.py
@@ -95,6 +95,67 @@
         }
     }
 
+    FAKE_ROUTER_ID = "f8a44de0-fc8e-45df-93c7-f79bf3b01c95"
+    FAKE_INTERFACE = {
+        "id": "915a14a6-867b-4af7-83d1-70efceb146f9",
+        "network_id": "91c013e2-d65a-474e-9177-c3e1799ca726",
+        "port_id": "2dc46bcc-d1f2-4077-b99e-91ee28afaff0",
+        "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1",
+        "subnet_ids": [
+            "a2f1f29d-571b-4533-907f-5803ab96ead1"
+        ],
+        "project_id": "0bd18306d801447bb457a46252d82d13",
+        "tenant_id": "0bd18306d801447bb457a46252d82d13",
+        "tags": ["tag1,tag2"]
+    }
+    FAKE_INTERFACE_KWARGS = {
+        "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"
+    }
+    FAKE_SHOW_ROUTER = {
+        "router": {
+            "admin_state_up": "true",
+            "availability_zone_hints": [],
+            "availability_zones": [
+                "nova"
+            ],
+            "created_at": "2018-03-19T19:17:04Z",
+            "description": "",
+            "distributed": "false",
+            "external_gateway_info": {
+                "enable_snat": "true",
+                "external_fixed_ips": [
+                    {
+                        "ip_address": "172.24.4.6",
+                        "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
+                    },
+                    {
+                        "ip_address": "2001:db8::9",
+                        "subnet_id": "0c56df5d-ace5-46c8-8f4c-45fa4e334d18"
+                    }
+                ],
+                "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
+            },
+            "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+            "ha": "false",
+            "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+            "name": "router1",
+            "revision_number": 1,
+            "routes": [
+                {
+                    "destination": "179.24.1.0/24",
+                    "nexthop": "172.24.3.99"
+                }
+            ],
+            "status": "ACTIVE",
+            "updated_at": "2018-03-19T19:17:22Z",
+            "project_id": "0bd18306d801447bb457a46252d82d13",
+            "tenant_id": "0bd18306d801447bb457a46252d82d13",
+            "service_type_id": "null",
+            "tags": ["tag1,tag2"],
+            "conntrack_helpers": []
+        }
+    }
+
     def setUp(self):
         super(TestRoutersClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -116,6 +177,15 @@
             bytes_body,
             name="another_router", admin_state_up="true", status=201)
 
+    def _test_show_router(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_router,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_SHOW_ROUTER,
+            bytes_body,
+            200,
+            router_id=self.FAKE_ROUTER_ID)
+
     def _test_update_router(self, bytes_body=False):
         self.check_service_client_function(
             self.client.update_router,
@@ -125,6 +195,24 @@
             router_id="8604a0de-7f6b-409a-a47c-a1cc7bc77b2e",
             admin_state_up=False)
 
+    def _test_add_router_interface(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.add_router_interface,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_INTERFACE,
+            bytes_body,
+            router_id=self.FAKE_ROUTER_ID,
+            **self.FAKE_INTERFACE_KWARGS)
+
+    def _test_remove_router_interface(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.remove_router_interface,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_INTERFACE,
+            bytes_body,
+            router_id=self.FAKE_ROUTER_ID,
+            **self.FAKE_INTERFACE_KWARGS)
+
     def test_list_routers_with_str_body(self):
         self._test_list_routers()
 
@@ -148,3 +236,21 @@
 
     def test_update_router_with_bytes_body(self):
         self._test_update_router(bytes_body=True)
+
+    def test_show_router_with_str_body(self):
+        self._test_show_router()
+
+    def test_show_router_with_bytes_body(self):
+        self._test_show_router(bytes_body=True)
+
+    def test_add_router_interface_with_str_body(self):
+        self._test_add_router_interface()
+
+    def test_add_router_interface_with_bytes_body(self):
+        self._test_add_router_interface(bytes_body=True)
+
+    def test_remove_router_interface_with_str_body(self):
+        self._test_remove_router_interface()
+
+    def test_remove_router_interface_with_bytes_body(self):
+        self._test_remove_router_interface(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_subnetpools_client.py b/tempest/tests/lib/services/network/test_subnetpools_client.py
index 3abb438..2dfa25e 100644
--- a/tempest/tests/lib/services/network/test_subnetpools_client.py
+++ b/tempest/tests/lib/services/network/test_subnetpools_client.py
@@ -26,13 +26,13 @@
         "subnetpools": [
             {
                 "min_prefixlen": "64",
-                "address_scope_id": None,
+                "address_scope_id": "null",
                 "default_prefixlen": "64",
                 "id": "03f761e6-eee0-43fc-a921-8acf64c14988",
                 "max_prefixlen": "64",
                 "name": "my-subnet-pool-ipv6",
-                "default_quota": None,
-                "is_default": False,
+                "default_quota": "null",
+                "is_default": "false",
                 "project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
                 "tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
                 "prefixes": [
@@ -40,19 +40,22 @@
                     "2001:db8::/63"
                 ],
                 "ip_version": 6,
-                "shared": False,
+                "shared": "false",
                 "description": "",
-                "revision_number": 2
+                "created_at": "2016-03-08T20:19:41",
+                "updated_at": "2016-03-08T20:19:41",
+                "revision_number": 2,
+                "tags": ["tag1,tag2"]
             },
             {
                 "min_prefixlen": "24",
-                "address_scope_id": None,
+                "address_scope_id": "null",
                 "default_prefixlen": "25",
                 "id": "f49a1319-423a-4ee6-ba54-1d95a4f6cc68",
                 "max_prefixlen": "30",
                 "name": "my-subnet-pool-ipv4",
-                "default_quota": None,
-                "is_default": False,
+                "default_quota": "null",
+                "is_default": "false",
                 "project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
                 "tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
                 "prefixes": [
@@ -60,9 +63,12 @@
                     "192.168.0.0/16"
                 ],
                 "ip_version": 4,
-                "shared": False,
+                "shared": "false",
                 "description": "",
-                "revision_number": 2
+                "created_at": "2016-03-08T20:19:41",
+                "updated_at": "2016-03-08T20:19:41",
+                "revision_number": 2,
+                "tags": ["tag1,tag2"]
             }
         ]
     }
diff --git a/tempest/tests/lib/test_base.py b/tempest/tests/lib/test_base.py
index 2c16e1c..de6021c 100644
--- a/tempest/tests/lib/test_base.py
+++ b/tempest/tests/lib/test_base.py
@@ -48,7 +48,7 @@
     @classmethod
     def setUpClass(cls):  # noqa
         """Simulate absence of super() call."""
-        cls.orig_skip_exception = cls.skipException
+        pass
 
     def setUp(self):
         try:
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 7c31185..464e66a 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -240,3 +240,9 @@
             with_other_decorators=True,
             with_negative_decorator=False,
             expected_success=False)
+
+    def test_no_log_warn(self):
+        self.assertFalse(list(checks.no_log_warn(
+            'LOG.warning("LOG.warn is deprecated")')))
+        self.assertTrue(list(checks.no_log_warn(
+            'LOG.warn("LOG.warn is deprecated")')))
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index 9aeedb3..cbb81e2 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 import os
-import sys
+import unittest
 from unittest import mock
 
 from oslo_config import cfg
@@ -24,6 +24,9 @@
 from tempest import config
 from tempest.lib.common import validation_resources as vr
 from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import base_compute_client
+from tempest.lib.services.placement import base_placement_client
+from tempest.lib.services.volume import base_client as base_volume_client
 from tempest import test
 from tempest.tests import base
 from tempest.tests import fake_config
@@ -31,12 +34,6 @@
 from tempest.tests.lib.services import registry_fixture
 
 
-if sys.version_info >= (2, 7):
-    import unittest
-else:
-    import unittest2 as unittest
-
-
 class LoggingTestResult(testtools.TestResult):
 
     def __init__(self, log, *args, **kwargs):
@@ -749,3 +746,186 @@
                          self.test.fixtures_invoked)
         found_exc = log[0][1][1]
         self.assertIn(expected_exc, str(found_exc))
+
+
+class TestAPIMicroversionTest1(test.BaseTestCase):
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestAPIMicroversionTest1, cls).resource_setup()
+        # Setting microvesions and checks that every tests
+        # of this class will have those microversion set
+        # on service clients requesting service APIs.
+        cls.setup_api_microversion_fixture(
+            compute_microversion='2.30',
+            volume_microversion='3.10',
+            placement_microversion='3.1')
+        # Check microvesion is set during resource_setup()
+        if base_compute_client.COMPUTE_MICROVERSION != '2.30':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_volume_client.VOLUME_MICROVERSION != '3.10':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_placement_client.PLACEMENT_MICROVERSION != '3.1':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+
+    @classmethod
+    def resource_cleanup(cls):
+        super(TestAPIMicroversionTest1, cls).resource_cleanup()
+        # Check microversion is reset back to None in resource_cleanup()
+        if base_compute_client.COMPUTE_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_volume_client.VOLUME_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_placement_client.PLACEMENT_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+
+    def setUp(self):
+        super(TestAPIMicroversionTest1, self).setUp()
+        # Check microversion is set in setUp method also.
+        self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def tearDown(self):
+        super(TestAPIMicroversionTest1, self).tearDown()
+        # Check microversion is set in tearDown method also.
+        self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_1(self):
+        self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_2(self):
+        self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+
+class TestAPIMicroversionTest2(test.BaseTestCase):
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestAPIMicroversionTest2, cls).resource_setup()
+        # Setting microvesions different from what set in
+        # MicroversionTest1 and checks that every tests
+        # of this class will have the new microversion set
+        # on service clients requesting service APIs.
+        cls.setup_api_microversion_fixture(
+            compute_microversion='2.80',
+            volume_microversion='3.80',
+            placement_microversion='3.8')
+        # Check microvesion is set during resource_setup()
+        if base_compute_client.COMPUTE_MICROVERSION != '2.80':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_volume_client.VOLUME_MICROVERSION != '3.80':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_placement_client.PLACEMENT_MICROVERSION != '3.8':
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+
+    @classmethod
+    def resource_cleanup(cls):
+        super(TestAPIMicroversionTest2, cls).resource_cleanup()
+        # Check microversion is reset back to None in resource_cleanup()
+        if base_compute_client.COMPUTE_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_volume_client.VOLUME_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_placement_client.PLACEMENT_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+
+    def setUp(self):
+        super(TestAPIMicroversionTest2, self).setUp()
+        # Check microversion is set in setUp method also.
+        self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def tearDown(self):
+        super(TestAPIMicroversionTest2, self).tearDown()
+        # Check microversion is set in tearDown method also.
+        self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_1(self):
+        self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_2(self):
+        self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+        self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+        self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+
+class TestAPIMicroversionTest3(test.BaseTestCase):
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestAPIMicroversionTest3, cls).resource_setup()
+        # Not setting microversion for this test class so
+        # there should not be any micorversion set on service
+        # clients requesting services APIs.
+        # Check microvesion is not set during resource_setup()
+        if base_compute_client.COMPUTE_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_volume_client.VOLUME_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+        if base_placement_client.PLACEMENT_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not set in resource_setup method")
+
+    @classmethod
+    def resource_cleanup(cls):
+        super(TestAPIMicroversionTest3, cls).resource_cleanup()
+        # Check microversion is set to None in resource_cleanup()
+        if base_compute_client.COMPUTE_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_volume_client.VOLUME_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+        if base_placement_client.PLACEMENT_MICROVERSION is not None:
+            raise testtools.TestCase.failureException(
+                "Microversion is not reset to None in resource_cleanup method")
+
+    def setUp(self):
+        super(TestAPIMicroversionTest3, self).setUp()
+        # Check microversion is None in setUp method also.
+        self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+        self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+        self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+    def tearDown(self):
+        super(TestAPIMicroversionTest3, self).tearDown()
+        # Check microversion is None in tearDown method also.
+        self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+        self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+        self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_1(self):
+        self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+        self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+        self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+    def test_2(self):
+        self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+        self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+        self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 1b5b369..b96bbe4 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -35,28 +35,48 @@
 # TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
 # future when the patches are merged.
 NON_ACTIVE_LIST = [
-    'x/gce-api',  # It looks gce-api doesn't support python3 yet.
+    'x/gce-api',  # It looks gce-api doesn't support python3 yet
+    # https://bugs.launchpad.net/gce-api/+bug/1931094
     'x/glare',  # To avoid sanity-job failure
-    'x/group-based-policy',  # It looks this doesn't support python3 yet.
-    'x/intel-nfv-ci-tests',  # https://review.opendev.org/#/c/634640/
+    'x/group-based-policy',
+    # https://bugs.launchpad.net/group-based-policy/+bug/1931091
+    'x/intel-nfv-ci-tests',  # To avoid sanity-job failure
     'openstack/networking-generic-switch',
+    # This is not a real tempest plugin,
     # https://review.opendev.org/#/c/634846/
-    'x/networking-l2gw-tempest-plugin',
-    # https://review.opendev.org/#/c/635093/
-    'openstack/networking-midonet',  # https://review.opendev.org/#/c/635096/
-    'x/networking-plumgrid',  # https://review.opendev.org/#/c/635096/
+    'x/networking-plumgrid',  # No longer contains tempest tests
     'x/networking-spp',  # https://review.opendev.org/#/c/635098/
+    # networking-spp is missing neutron-tempest-plugin as a dep plus
+    # test-requirements.txt is nested in a openstack dir and sanity script
+    # doesn't count with such scenario yet
     'openstack/neutron-dynamic-routing',
+    # As tests have been migrated to neutron-tempest-plugin:
     # https://review.opendev.org/#/c/637718/
-    'openstack/neutron-vpnaas',  # https://review.opendev.org/#/c/637719/
-    'x/tap-as-a-service',  # To avoid sanity-job failure
-    'x/valet',  # https://review.opendev.org/#/c/638339/
-    'x/kingbird',  # https://bugs.launchpad.net/kingbird/+bug/1869722
-    # vmware-nsx is excluded since https://review.opendev.org/#/c/736952
-    'x/vmware-nsx-tempest-plugin',
+    'openstack/neutron-vpnaas',
+    # As tests have been migrated to neutron-tempest-plugin:
+    # https://review.opendev.org/c/openstack/neutron-vpnaas/+/695834
+    'x/valet',  # valet is unmaintained now
+    # https://review.opendev.org/c/x/valet/+/638339
+    'x/kingbird',  # kingbird is unmaintained now
+    # https://bugs.launchpad.net/kingbird/+bug/1869722
+    'x/mogan',
     # mogan is unmaintained now, remove from the list when this is merged:
     # https://review.opendev.org/c/x/mogan/+/767718
-    'x/mogan',
+    'x/vmware-nsx-tempest-plugin'
+    # Failing since 2021-08-27
+    # https://zuul.opendev.org/t/openstack/build
+    # /45f6c8d3c62d4387a70b7b471ec687c8
+    # Below plugins failing for error in psycopg2 __init__
+    # ImportError: libpq.so.5: cannot open shared object
+    # file: No such file or directory
+    # https://zuul.opendev.org/t/openstack/build
+    # /b61a48196dfa476d83645aea4853e544/log/job-output.txt#271722
+    # Failing since 2021-09-08
+    'x/networking-l2gw-tempest-plugin'
+    'x/novajoin-tempest-plugin'
+    'x/ranger-tempest-plugin'
+    'x/tap-as-a-service-tempest-plugin'
+    'x/trio2o'
 ]
 
 url = 'https://review.opendev.org/projects/'
diff --git a/tox.ini b/tox.ini
index cd32174..b07fdaf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = pep8,py36,py38,bashate,pip-check-reqs
+envlist = pep8,py36,py39,bashate,pip-check-reqs
 minversion = 3.18.0
 skipsdist = True
 ignore_basepython_conflict = True
@@ -10,6 +10,7 @@
 setenv =
     VIRTUAL_ENV={envdir}
     OS_TEST_PATH=./tempest/test_discover
+    OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 deps =
     -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
@@ -61,7 +62,6 @@
 # 'all' includes slow tests
 setenv =
     {[tempestenv]setenv}
-    OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 deps = {[tempestenv]deps}
 commands =
     find . -type f -name "*.pyc" -delete
@@ -79,7 +79,6 @@
 # 'all' includes slow tests
 setenv =
     {[tempestenv]setenv}
-    OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 basepython = {[tempestenv]basepython}
 deps = {[tempestenv]deps}
 commands =
@@ -93,7 +92,6 @@
 # 'all' includes slow tests
 setenv =
     {[tempestenv]setenv}
-    OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
 basepython = {[tempestenv]basepython}
 deps = {[tempestenv]deps}
 commands =
@@ -125,6 +123,18 @@
     find . -type f -name "*.pyc" -delete
     tempest run --regex '(^tempest\.scenario.*)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
 
+[testenv:api-microversion-tests]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select all tempest api tests for services having API
+# microversion concept.
+commands =
+    find . -type f -name "*.pyc" -delete
+    tempest run --regex '(^tempest\.api\.compute)|(^tempest\.api\.volume)' {posargs}
+
 [testenv:integrated-network]
 envdir = .tox/tempest
 sitepackages = {[tempestenv]sitepackages}
@@ -335,7 +345,8 @@
 # E123 skipped because it is ignored by default in the default pep8
 # E129 skipped because it is too limiting when combined with other rules
 # W504 skipped because it is overeager and unnecessary
-ignore = E125,E123,E129,W504
+# H405 skipped because it arbitrarily forces doctring "title" lines
+ignore = E125,E123,E129,W504,H405
 show-source = True
 exclude = .git,.venv,.tox,dist,doc,*egg,build
 enable-extensions = H106,H203,H904
@@ -358,6 +369,7 @@
   T115 = checks:dont_put_admin_tests_on_nonadmin_path
   T116 = checks:unsupported_exception_attribute_PY3
   T117 = checks:negative_test_attribute_always_applied_to_negative_tests
+  T118 = checks:no_log_warn
 paths =
   ./tempest/hacking
 
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 2da5579..d35e25d 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -82,21 +82,9 @@
         GLANCE_USE_IMPORT_WORKFLOW: True
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
       devstack_services:
         # Enbale horizon so that we can run horizon test.
         horizon: true
-        neutron-placement: true
-        neutron-qos: true
 
 - job:
     name: tempest-integrated-networking
@@ -141,6 +129,23 @@
         c-bak: false
 
 - job:
+    name: tempest-integrated-compute-centos-8-stream
+    parent: tempest-integrated-compute
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
+    nodeset: devstack-single-node-centos-8-stream
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+      This version of the job also uses CentOS 8 stream.
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
     name: tempest-integrated-placement
     parent: devstack-tempest
     branches: ^(?!stable/ocata).*$
@@ -194,6 +199,20 @@
         USE_PYTHON3: true
 
 - job:
+    name: tempest-with-latest-microversion
+    parent: tempest-full-py3
+    description: |
+      This job runs compute, placement and volume API tests with 'latest'
+      API microversion (This can be extended to other services having API
+      microversion concept).
+    vars:
+      tox_envlist: api-microversion-tests
+      devstack_localrc:
+        TEMPEST_COMPUTE_MIN_MICROVERSION: 'latest'
+        TEMPEST_VOLUME_MIN_MICROVERSION: 'latest'
+        TEMPEST_PLACEMENT_MIN_MICROVERSION: 'latest'
+
+- job:
     name: tempest-multinode-full
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-focal
@@ -215,30 +234,12 @@
         USE_PYTHON3: true
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
       devstack_services:
-        neutron-placement: true
-        neutron-qos: true
         neutron-trunk: true
     group-vars:
       subnode:
         devstack_localrc:
           USE_PYTHON3: true
-        devstack_local_conf:
-          post-config:
-            "/$NEUTRON_CORE_PLUGIN_CONF":
-              ovs:
-                bridge_mappings: public:br-ex
-                resource_provider_bandwidths: br-ex:1000000:1000000
 
 - job:
     name: tempest-slow
@@ -297,6 +298,22 @@
         TEMPEST_VOLUME_TYPE: volumev2
 
 - job:
+    name: tempest-centos8-stream-fips
+    parent: devstack-tempest
+    description: |
+      Integration testing for a FIPS enabled Centos 8 system
+    nodeset: devstack-single-node-centos-8-stream
+    pre-run: playbooks/enable-fips.yaml
+    vars:
+      tox_envlist: full
+      configure_swap_size: 4096
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            validation:
+              ssh_key_type: 'ecdsa'
+
+- job:
     name: tempest-pg-full
     parent: tempest-full-py3
     description: |
@@ -318,11 +335,15 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-networking
+        - openstacksdk-functional-devstack
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
+        - openstacksdk-functional-devstack
 
 - project-template:
     name: integrated-gate-compute
@@ -333,10 +354,16 @@
       run on Nova gate only.
     check:
       jobs:
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
+        - openstacksdk-functional-devstack
     gate:
       jobs:
         - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
+        - openstacksdk-functional-devstack
 
 - project-template:
     name: integrated-gate-placement
@@ -348,11 +375,15 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-placement
+        - openstacksdk-functional-devstack
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
+        - openstacksdk-functional-devstack
 
 - project-template:
     name: integrated-gate-storage
@@ -364,11 +395,15 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-storage
+        - openstacksdk-functional-devstack
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
+        - openstacksdk-functional-devstack
 
 - project-template:
     name: integrated-gate-object-storage
@@ -381,7 +416,9 @@
       jobs:
         - grenade
         - tempest-integrated-object-storage
+        - openstacksdk-functional-devstack
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
+        - openstacksdk-functional-devstack
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 7a3afc2..e62f24a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -3,11 +3,15 @@
       - check-requirements
       - integrated-gate-py3
       - openstack-cover-jobs
-      - openstack-python3-xena-jobs
       - publish-openstack-docs-pti
       - release-notes-jobs-python3
     check:
       jobs:
+        - openstack-tox-pep8
+        - openstack-tox-py36
+        - openstack-tox-py37
+        - openstack-tox-py38
+        - openstack-tox-py39
         - tempest-full-parallel:
             # Define list of irrelevant files to use everywhere else
             irrelevant-files: &tempest-irrelevant-files
@@ -31,14 +35,14 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-xena:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-wallaby-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-victoria-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-ussuri-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-train-py3:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -54,9 +58,44 @@
               - ^.gitignore$
               - ^.gitreview$
               - ^.mailmap$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/verify-ipv6-only-deployments.sh
+              - ^tools/with_venv.sh
               # tools/ is not here since this relies on a script in tools/.
         - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
+            irrelevant-files: &tempest-irrelevant-files-3
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/generate-tempest-plugins-list.py
+              - ^tools/generate-tempest-plugins-list.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/tempest-plugin-sanity.sh
+              - ^tools/with_venv.sh
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - nova-live-migration:
@@ -81,22 +120,34 @@
             irrelevant-files: *tempest-irrelevant-files
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-centos-9-stream:
+            irrelevant-files: *tempest-irrelevant-files
     gate:
       jobs:
+        - openstack-tox-pep8
+        - openstack-tox-py36
+        - openstack-tox-py37
+        - openstack-tox-py38
+        - openstack-tox-py39
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
+            irrelevant-files: *tempest-irrelevant-files-3
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
             irrelevant-files: *tempest-irrelevant-files
@@ -110,14 +161,14 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-pg-full:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-opensuse15:
+        - tempest-centos8-stream-fips:
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-xena
         - tempest-full-wallaby-py3
         - tempest-full-victoria-py3
         - tempest-full-ussuri-py3
-        - tempest-full-train-py3
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 852bafb..5cc0dd0 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-xena
+    parent: tempest-full-py3
+    override-checkout: stable/xena
+
+- job:
     name: tempest-full-wallaby-py3
     parent: tempest-full-py3
     override-checkout: stable/wallaby
@@ -16,12 +21,6 @@
     override-checkout: stable/ussuri
 
 - job:
-    name: tempest-full-train-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/train
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift disabled on py3
@@ -179,3 +178,24 @@
       subnode:
         devstack_localrc:
           USE_PYTHON3: true
+
+- job:
+    name: tempest-full-py3-opensuse15
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-opensuse-15
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on openSUSE Leap 15.x
+    voting: false
+    # This job is not used after stable/xena and can be
+    # removed once stable/xena is EOL.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+      - stable/victoria
+      - stable/wallaby
+      - stable/xena
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 5063d89..7d28e5c 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -69,13 +69,24 @@
         c-bak: false
 
 - job:
-    name: tempest-full-py3-opensuse15
+    name: tempest-full-py3-centos-8-stream
     parent: tempest-full-py3
-    nodeset: devstack-single-node-opensuse-15
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
+    nodeset: devstack-single-node-centos-8-stream
     description: |
       Base integration test with Neutron networking and py36 running
-      on openSUSE Leap 15.x
+      on CentOS 8 stream
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
+    name: tempest-full-centos-9-stream
+    parent: tempest-full-py3-centos-8-stream
     voting: false
+    nodeset: devstack-single-node-centos-9-stream
 
 - job:
     name: tempest-tox-plugin-sanity-check