Merge "Update volume schema for microversion"
diff --git a/HACKING.rst b/HACKING.rst
index 95bcbb5..dc28e4e 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -26,6 +26,7 @@
 - [T116] Unsupported 'message' Exception attribute in PY3
 - [T117] Check negative tests have ``@decorators.attr(type=['negative'])``
   applied.
+- [T118] LOG.warn is deprecated. Enforce use of LOG.warning.
 
 It is recommended to use ``tox -eautopep8`` before submitting a patch.
 
diff --git a/bindep.txt b/bindep.txt
index efd3a10..7d34939 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -5,7 +5,6 @@
 libffi-devel [platform:rpm]
 gcc [platform:rpm]
 gcc [platform:dpkg]
-python-dev [platform:dpkg]
 python-devel [platform:rpm]
 python3-dev [platform:dpkg]
 python3-devel [platform:rpm]
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index ecf2930..20ace9e 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -374,6 +374,10 @@
 
   .. _2.42: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-ocata
 
+  * `2.45`_
+
+  .. _2.45: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id41
+
   * `2.47`_
 
   .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
@@ -386,6 +390,10 @@
 
   .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
 
+  * `2.50`_
+
+  .. _2.50: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id46
+
   * `2.53`_
 
   .. _2.53: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-pike
@@ -418,6 +426,10 @@
 
   .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
 
+  * `2.64`_
+
+  .. _2.64: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id59
+
   * `2.70`_
 
   .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
@@ -430,6 +442,10 @@
 
   .. _2.73: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id66
 
+  * `2.75`_
+
+  .. _2.75: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id68
+
   * `2.79`_
 
   .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 4ca7f0d..f630578 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,10 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Yoga
+* Xena
+* Wallaby
 * Victoria
-* Ussuri
-* Train
 
 For older OpenStack Release:
 
@@ -32,6 +33,5 @@
 
 Tempest master supports the below python versions:
 
-* Python 3.6
-* Python 3.7
 * Python 3.8
+* Python 3.9
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index a3bb645..bb223b1 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -50,19 +50,10 @@
 a Depends-On in the commit message for the commit which moved the test into
 another repo.
 
-For prong 2 you can use OpenStack-Health:
+For prong 2 you can use subunit2sql:
 
-Using OpenStack-Health
-""""""""""""""""""""""
-
-Go to: http://status.openstack.org/openstack-health and then navigate to a per
-test page for six months. You'll end up with a page that will graph the success
-and failure rates on the bottom graph. For example, something like `this URL`_.
-
-.. _this URL: http://status.openstack.org/openstack-health/#/test/tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern?groupKey=project&resolutionKey=day&duration=P6M
-
-The Old Way using subunit2sql directly
-""""""""""""""""""""""""""""""""""""""
+Using subunit2sql directly
+""""""""""""""""""""""""""
 
 ``SELECT * from tests where test_id like "%test_id%";``
 (where ``$test_id`` is the full test_id, but truncated to the class because of
diff --git a/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml b/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml
new file mode 100644
index 0000000..5fc316b
--- /dev/null
+++ b/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+  - |
+    As the version of cirros used in OpenStack CI does not support SHA-2
+    signatures for ssh, any connection from a FIPS enabled machine will fail
+    in case validation.ssh_key_type is set to rsa (the default until now).
+    Using ecdsa keys helps us avoid the mentioned issue.
+    From now on, the validation.ssh_key_type option will be set to ecdsa
+    by default for testing simplicity.
+    This change shouldn't have any drastic effect on any tempest consumer,
+    in case rsa ssh type is required in a consumer's scenario,
+    validation.ssh_key_type can be overridden to rsa.
diff --git a/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
new file mode 100644
index 0000000..ec4e2f2
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Python 3.6 and 3.7 support has been dropped. Last release of Tempest
+    to support python 3.6 and 3.7 is Temepst 30.0.0. The minimum version
+    of Python now supported by Tempest is Python 3.8.
diff --git a/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml b/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml
new file mode 100644
index 0000000..1a750d9
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml
@@ -0,0 +1,13 @@
+---
+prelude: |
+    This is an intermediate release during the Zed development cycle to
+    mark the end of support for EM Ussuri release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+    * Victoria
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle.
diff --git a/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
new file mode 100644
index 0000000..c644e3a
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Zed development cycle to
+    mark the end of support for EM Victoria release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle.
diff --git a/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
new file mode 100644
index 0000000..9f4abd1
--- /dev/null
+++ b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Added new module net_downtime including the fixture NetDowntimeMeter that
+    can be used to measure how long the connectivity with an IP is lost
+    during certain operations like a server live migration.
+    The configuration option allowed_network_downtime has been added with a
+    default value of 5.0 seconds, which would be the maximum time that
+    the connectivity downtime is expected to last.
diff --git a/releasenotes/notes/remove-compute-api-extensions-config-b8564f60f4fa5495.yaml b/releasenotes/notes/remove-compute-api-extensions-config-b8564f60f4fa5495.yaml
new file mode 100644
index 0000000..55df775
--- /dev/null
+++ b/releasenotes/notes/remove-compute-api-extensions-config-b8564f60f4fa5495.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    Remove deprecated config option ``api_extensions`` from
+    ``compute_feature_enabled`` groups.
diff --git a/releasenotes/notes/tempest-yoga-release-66e8484b9a402e9f.yaml b/releasenotes/notes/tempest-yoga-release-66e8484b9a402e9f.yaml
new file mode 100644
index 0000000..e41e31d
--- /dev/null
+++ b/releasenotes/notes/tempest-yoga-release-66e8484b9a402e9f.yaml
@@ -0,0 +1,18 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Yoga release.
+    This release marks the start of Yoga release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle. Every Tempest commit is also tested against master during
+    the Zed cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Zed (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Yoga release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 6a1f8b4..e1e6597 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,10 @@
    :maxdepth: 1
 
    unreleased
+   v31.0.0
+   v30.0.0
+   v29.2.0
+   v29.1.0
    v29.0.0
    v28.1.0
    v28.0.0
diff --git a/releasenotes/source/v29.1.0.rst b/releasenotes/source/v29.1.0.rst
new file mode 100644
index 0000000..f8780fd
--- /dev/null
+++ b/releasenotes/source/v29.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v29.1.0 Release Notes
+=====================
+.. release-notes:: 29.1.0 Release Notes
+   :version: 29.1.0
diff --git a/releasenotes/source/v29.2.0.rst b/releasenotes/source/v29.2.0.rst
new file mode 100644
index 0000000..4f2f2b2
--- /dev/null
+++ b/releasenotes/source/v29.2.0.rst
@@ -0,0 +1,5 @@
+=====================
+v29.2.0 Release Notes
+=====================
+.. release-notes:: 29.2.0 Release Notes
+   :version: 29.2.0
diff --git a/releasenotes/source/v30.0.0.rst b/releasenotes/source/v30.0.0.rst
new file mode 100644
index 0000000..048b8ab
--- /dev/null
+++ b/releasenotes/source/v30.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v30.0.0 Release Notes
+=====================
+.. release-notes:: 30.0.0 Release Notes
+   :version: 30.0.0
diff --git a/releasenotes/source/v31.0.0.rst b/releasenotes/source/v31.0.0.rst
new file mode 100644
index 0000000..8fb797c
--- /dev/null
+++ b/releasenotes/source/v31.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v31.0.0 Release Notes
+=====================
+.. release-notes:: 31.0.0 Release Notes
+   :version: 31.0.0
diff --git a/requirements.txt b/requirements.txt
index bc8358b..c4c7fcc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,4 +21,3 @@
 PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
-unittest2>=1.1.0 # BSD
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 0c72b69..d9f855a 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/stein.
+   Upper constraints file to be used for stable branch till stable/victoria.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index a8b3ede..f302fa5 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/stein
+- name: Use stable branch upper-constraints till stable/victoria
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -78,6 +78,17 @@
         exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
       when: exclude_list_stat.stat.exists
 
+- name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
+  # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+  # in stable/train|ussuri|victoria) which does not have new args exclude-list
+  # so let's fallback to old arg if new arg is passed.
+  set_fact:
+    exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
+  when:
+    - tempest_test_exclude_list is defined
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
 # compatibility
@@ -94,6 +105,19 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
+    - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
+
+- name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
+  # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+  # stable/train|ussuri|victoria) which does not have new args exclude-list so
+  # let's fallback to old arg if new arg is passed.
+  set_fact:
+    tempest_test_exclude_regex: "--black-regex={{tempest_exclude_regex|quote}}"
+  when:
+    - tempest_black_regex is not defined
+    - tempest_exclude_regex is defined
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
diff --git a/setup.cfg b/setup.cfg
index a41eccf..a531eb4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
 home_page = https://docs.openstack.org/tempest/latest/
-python_requires = >=3.6
+python_requires = >=3.8
 classifier =
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -15,8 +15,6 @@
     Operating System :: POSIX :: Linux
     Programming Language :: Python
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.6
-    Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
     Programming Language :: Python :: 3 :: Only
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index d6b6b7e..294b1ab 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -16,7 +16,6 @@
 import uuid
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -26,13 +25,6 @@
     """Tests Flavors API Create and Delete that require admin privileges"""
 
     @classmethod
-    def skip_checks(cls):
-        super(FlavorsAdminTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
-            msg = "OS-FLV-EXT-DATA extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def resource_setup(cls):
         super(FlavorsAdminTestJSON, cls).resource_setup()
 
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 87ab7c7..c86ff76 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib import decorators
 
 
@@ -25,13 +24,6 @@
     """
 
     @classmethod
-    def skip_checks(cls):
-        super(FlavorsAccessTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
-            msg = "OS-FLV-EXT-DATA extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def resource_setup(cls):
         super(FlavorsAccessTestJSON, cls).resource_setup()
 
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index ac09cb0..3b38693 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
@@ -28,13 +27,6 @@
     credentials = ['primary', 'admin', 'alt']
 
     @classmethod
-    def skip_checks(cls):
-        super(FlavorsAccessNegativeTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
-            msg = "OS-FLV-EXT-DATA extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def resource_setup(cls):
         super(FlavorsAccessNegativeTestJSON, cls).resource_setup()
 
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 10018fe..da95660 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
@@ -27,13 +26,6 @@
     """
 
     @classmethod
-    def skip_checks(cls):
-        super(FlavorsExtraSpecsTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
-            msg = "OS-FLV-EXT-DATA extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def resource_setup(cls):
         super(FlavorsExtraSpecsTestJSON, cls).resource_setup()
         flavor_name = data_utils.rand_name('test_flavor')
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 721acca..6822614 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -15,7 +15,6 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -28,13 +27,6 @@
     """
 
     @classmethod
-    def skip_checks(cls):
-        super(FlavorsExtraSpecsNegativeTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('OS-FLV-EXT-DATA', 'compute'):
-            msg = "OS-FLV-EXT-DATA extension not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def resource_setup(cls):
         super(FlavorsExtraSpecsNegativeTestJSON, cls).resource_setup()
 
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index c91b557..2826f56 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -34,11 +34,6 @@
 class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
     """Test live migration operations supported by admin user"""
 
-    # These tests don't attempt any SSH validation nor do they use
-    # floating IPs on the instance, so all we need is a network and
-    # a subnet so the instance being migrated has a single port, but
-    # we need that to make sure we are properly updating the port
-    # host bindings during the live migration.
     create_default_network = True
 
     @classmethod
@@ -104,6 +99,11 @@
     max_microversion = '2.24'
     block_migration = None
 
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(LiveMigrationTest, cls).setup_credentials()
+
     def _test_live_migration(self, state='ACTIVE', volume_backed=False):
         """Tests live migration between two hosts.
 
@@ -182,7 +182,12 @@
         attach volume. This differs from test_volume_backed_live_migration
         above that tests live-migration with only an attached volume.
         """
-        server = self.create_test_server(wait_until="ACTIVE")
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
         server_id = server['id']
         if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
             # not to specify a host so that the scheduler will pick one
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 9d5e0c9..caf4fc1 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -14,14 +14,17 @@
 #    under the License.
 
 from oslo_log import log as logging
+import testtools
 from testtools import matchers
 
 from tempest.api.compute import base
 from tempest.common import identity
 from tempest.common import tempest_fixtures as fixtures
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
 LOG = logging.getLogger(__name__)
 
 
@@ -110,6 +113,8 @@
             self.assertIn(quota, quota_set.keys())
 
     @decorators.idempotent_id('55fbe2bf-21a9-435b-bbd2-4162b0ed799a')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_update_all_quota_resources_for_tenant(self):
         """Test admin can update all the compute quota limits for a project"""
         default_quota_set = self.adm_client.show_default_quota_set(
@@ -141,11 +146,15 @@
 
     # TODO(afazekas): merge these test cases
     @decorators.idempotent_id('ce9e0815-8091-4abd-8345-7fe5b85faa1d')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_get_updated_quotas(self):
         """Test that GET shows the updated quota set of project"""
         self._get_updated_quotas()
 
     @decorators.idempotent_id('389d04f0-3a41-405f-9317-e5f86e3c44f0')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_delete_quota(self):
         """Test admin can delete the compute quota set for a project"""
         project_name = data_utils.rand_name('ram_quota_project')
@@ -178,6 +187,8 @@
     min_microversion = '2.36'
 
     @decorators.idempotent_id('4268b5c9-92e5-4adc-acf1-3a2798f3d803')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_get_updated_quotas(self):
         """Test compute quotas API with microversion greater than 2.35
 
@@ -197,6 +208,8 @@
     min_microversion = '2.57'
 
     @decorators.idempotent_id('e641e6c6-e86c-41a4-9e5c-9493c0ae47ad')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_get_updated_quotas(self):
         """Test compute quotas API with microversion greater than 2.56
 
@@ -228,6 +241,8 @@
     # tests that get run all by themselves at the end under a
     # 'danger' flag.
     @decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_update_default_quotas(self):
         """Test updating default compute quota class set"""
         # get the current 'default' quota class values
@@ -253,3 +268,14 @@
             'default')['quota_class_set']
         self.assertThat(show_body.items(),
                         matchers.ContainsAll(body.items()))
+
+
+class QuotaClassesAdmin257Test(QuotaClassesAdminTestJSON):
+    """Test compute quotas with microversion greater than 2.56
+
+    # NOTE(gmann): This test tests the Quota class APIs response schema
+    # for 2.57 microversion. No specific assert or behaviour verification
+    # is needed.
+    """
+
+    min_microversion = '2.57'
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 04dbc2d..a4120bb 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.api.compute import base
 from tempest.common import utils
 from tempest import config
@@ -68,6 +70,8 @@
     # It can be moved into the setUpClass as well.
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('91058876-9947-4807-9f22-f6eb17140d9b')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_create_server_when_cpu_quota_is_full(self):
         """Disallow server creation when tenant's vcpu quota is full"""
         self._update_quota('cores', 0)
@@ -76,6 +80,8 @@
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6fdd7012-584d-4327-a61c-49122e0d5864')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_create_server_when_memory_quota_is_full(self):
         """Disallow server creation when tenant's memory quota is full"""
         self._update_quota('ram', 0)
@@ -84,6 +90,8 @@
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7c6be468-0274-449a-81c3-ac1c32ee0161')
+    @testtools.skipIf(CONF.compute_feature_enabled.unified_limits,
+                      'Legacy quota update not available with unified limits')
     def test_create_server_when_instances_quota_is_full(self):
         """Once instances quota limit is reached, disallow server creation"""
         self._update_quota('instances', 0)
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index ab1b49a..bc00f8c 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -223,3 +223,32 @@
         }
         self.create_test_server(scheduler_hints=hints,
                                 wait_until='ACTIVE')
+
+
+class ServersAdmin275Test(base.BaseV2ComputeAdminTest):
+    """Test compute server with microversion greater than 2.75
+
+    # NOTE(gmann): This test tests the Server APIs response schema
+    # for 2.75 microversion. No specific assert or behaviour verification
+    # is needed.
+    """
+
+    min_microversion = '2.75'
+
+    @decorators.idempotent_id('bf2b4a00-73a3-4d53-81fa-acbcd97d6339')
+    def test_rebuild_update_server_275(self):
+        server = self.create_test_server()
+        # Checking update response schema.
+        self.servers_client.update_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client, server['id'],
+                                       'ACTIVE')
+        # Checking rebuild API response schema
+        self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+        # Checking rebuild server with admin response schema.
+        self.os_admin.servers_client.rebuild_server(
+            server['id'], self.image_ref)
+        self.addCleanup(waiters.wait_for_server_status,
+                        self.os_admin.servers_client,
+                        server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 549d4fb..99d8e2a 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -85,10 +85,14 @@
             hw_scsi_model='virtio-scsi',
             hw_disk_bus='scsi',
             hw_cdrom_bus='scsi')
-        server = self.create_test_server(image_id=custom_img,
-                                         config_drive=True,
-                                         wait_until='ACTIVE')
-
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            image_id=custom_img,
+            config_drive=True,
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
         # NOTE(lyarwood): self.create_test_server delete the server
         # at class level cleanup so add server cleanup to ensure that
         # the instance is deleted first before created image. This
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index c1236a7..7da87c7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -26,6 +26,11 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(TestVolumeSwapBase, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(TestVolumeSwapBase, cls).skip_checks()
         if not CONF.compute_feature_enabled.swap_volume:
@@ -100,7 +105,16 @@
         volume1 = self.create_volume()
         volume2 = self.create_volume()
         # Boot server
-        server = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as the test
+        # will attach a volume to the server and therefore cleanup will try to
+        # detach it. See bug 1960346 for details.
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
@@ -200,9 +214,18 @@
         volume2 = self.create_volume(multiattach=True)
 
         # Create two servers and wait for them to be ACTIVE.
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
         reservation_id = self.create_test_server(
-            wait_until='ACTIVE', min_count=2,
-            return_reservation_id=True)['reservation_id']
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE',
+            min_count=2,
+            return_reservation_id=True,
+        )['reservation_id']
         # Get the servers using the reservation_id.
         servers = self.servers_client.list_servers(
             reservation_id=reservation_id)['servers']
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 10d522b..91ab09e 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -28,21 +28,22 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(VolumesAdminNegativeTest, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(VolumesAdminNegativeTest, cls).skip_checks()
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-    @classmethod
-    def resource_setup(cls):
-        super(VolumesAdminNegativeTest, cls).resource_setup()
-        cls.server = cls.create_test_server(wait_until='ACTIVE')
-
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
         """Test swapping non existent volume should fail"""
+        self.server = self.create_test_server(wait_until="ACTIVE")
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -55,6 +56,17 @@
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
         """Test swapping volume to a non existence volume should fail"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as
+        # test_update_attached_volume_with_nonexistent_volume_in_body case
+        # will attach a volume to it and therefore cleanup will try to detach
+        # it. See bug 1960346 for details.
+        self.server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
+
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -76,6 +88,13 @@
     min_microversion = '2.60'
     volume_min_microversion = '3.27'
 
+    create_default_network = True
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(UpdateMultiattachVolumeNegativeTest, cls).setup_credentials()
+
     @classmethod
     def skip_checks(cls):
         super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
@@ -101,8 +120,21 @@
         vol2 = self.create_volume(multiattach=True)
 
         # Create two instances.
-        server1 = self.create_test_server(wait_until='ACTIVE')
-        server2 = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
+        server1 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
+        server2 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
 
         # Attach vol1 to both of these instances.
         vol1_attachment1 = self.attach_volume(server1, vol1)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index ed50282..75df5ae 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -66,7 +66,9 @@
         # Setting network=True, subnet=True creates a default network
         cls.set_network_resources(
             network=cls.create_default_network,
-            subnet=cls.create_default_network)
+            subnet=cls.create_default_network,
+            router=cls.create_default_network,
+            dhcp=cls.create_default_network)
         super(BaseV2ComputeTest, cls).setup_credentials()
 
     @classmethod
@@ -306,10 +308,18 @@
     def create_test_server_group(cls, name="", policy=None):
         if not name:
             name = data_utils.rand_name(cls.__name__ + "-Server-Group")
-        if policy is None:
-            policy = ['affinity']
+        if cls.is_requested_microversion_compatible('2.63'):
+            policy = policy or ['affinity']
+            if not isinstance(policy, list):
+                policy = [policy]
+            kwargs = {'policies': policy}
+        else:
+            policy = policy or 'affinity'
+            if isinstance(policy, list):
+                policy = policy[0]
+            kwargs = {'policy': policy}
         body = cls.server_groups_client.create_server_group(
-            name=name, policies=policy)['server_group']
+            name=name, **kwargs)['server_group']
         cls.addClassResourceCleanup(
             test_utils.call_and_ignore_notfound_exc,
             cls.server_groups_client.delete_server_group,
@@ -404,7 +414,8 @@
         return image
 
     @classmethod
-    def recreate_server(cls, server_id, validatable=False, **kwargs):
+    def recreate_server(cls, server_id, validatable=False, wait_until='ACTIVE',
+                        **kwargs):
         """Destroy an existing class level server and creates a new one
 
         Some test classes use a test server that can be used by multiple
@@ -432,7 +443,7 @@
             validatable,
             validation_resources=cls.get_class_validation_resources(
                 cls.os_primary),
-            wait_until='ACTIVE',
+            wait_until=wait_until,
             adminPass=cls.password,
             **kwargs)
         return server['id']
@@ -447,15 +458,31 @@
         except Exception:
             LOG.exception('Failed to delete server %s', server_id)
 
-    def resize_server(self, server_id, new_flavor_id, **kwargs):
+    def resize_server(
+        self, server_id, new_flavor_id, wait_until='ACTIVE', **kwargs
+    ):
         """resize and confirm_resize an server, waits for it to be ACTIVE."""
         self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'VERIFY_RESIZE')
         self.servers_client.confirm_resize_server(server_id)
+
         waiters.wait_for_server_status(
             self.servers_client, server_id, 'ACTIVE')
         server = self.servers_client.show_server(server_id)['server']
+
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        if (
+            validation_resources and
+            wait_until in ("SSHABLE", "PINGABLE") and
+            CONF.validation.run_validation
+        ):
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, validation_resources, wait_until, True)
+
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
     def reboot_server(self, server_id, type):
@@ -487,21 +514,8 @@
         :param validation_resources: The dict of validation resources
             provisioned for the server.
         """
-        if CONF.validation.connect_method == 'floating':
-            if validation_resources:
-                return validation_resources['floating_ip']['ip']
-            else:
-                msg = ('When validation.connect_method equals floating, '
-                       'validation_resources cannot be None')
-                raise lib_exc.InvalidParam(invalid_param=msg)
-        elif CONF.validation.connect_method == 'fixed':
-            addresses = server['addresses'][CONF.validation.network_for_ssh]
-            for address in addresses:
-                if address['version'] == CONF.validation.ip_version_for_ssh:
-                    return address['addr']
-            raise exceptions.ServerUnreachable(server_id=server['id'])
-        else:
-            raise lib_exc.InvalidConfiguration()
+        return compute.get_server_ip(
+            server, validation_resources=validation_resources)
 
     @classmethod
     def create_volume(cls, image_ref=None, **kwargs):
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 5ff2a6a..124651e 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -43,6 +43,7 @@
 
 class ImagesNegativeTestJSON(ImagesNegativeTestBase):
     """Negative tests of server image"""
+    create_default_network = True
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6cd5a89d-5b47-46a7-93bc-3916f0d84973')
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 56456f4..7d29a4d 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -212,7 +212,7 @@
 
         server = self.create_test_server(
             validatable=True,
-            wait_until='ACTIVE',
+            wait_until='SSHABLE',
             validation_resources=validation_resources,
             config_drive=config_drive_enabled,
             name=data_utils.rand_name('device-tagging-server'),
@@ -335,7 +335,9 @@
     def verify_device_metadata(self, md_json):
         try:
             md_dict = json.loads(md_json)
-        except (json_decoder.JSONDecodeError, TypeError):
+        except (json_decoder.JSONDecodeError, TypeError) as e:
+            LOG.warning(
+                'Failed to decode json metadata: %s, %s', str(e), str(md_json))
             return False
 
         found_devices = [d['tags'][0] for d in md_dict['devices']
@@ -345,7 +347,9 @@
                 sorted(found_devices),
                 sorted(['nic-tag', 'volume-tag']))
             return True
-        except Exception:
+        except Exception as e:
+            LOG.warning(
+                'Failed to parse metadata: %s, %s', str(e), str(md_json))
             return False
 
     def verify_empty_devices(self, md_json):
@@ -398,7 +402,7 @@
             config_drive=config_drive_enabled,
             name=data_utils.rand_name('device-tagging-server'),
             networks=[{'uuid': self.get_tenant_network()['id']}],
-            wait_until='ACTIVE')
+            wait_until='SSHABLE')
         self.addCleanup(self.delete_server, server['id'])
 
         # NOTE(mgoddard): Get detailed server to ensure addresses are present
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 152e7e8..0ed73a8 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -43,6 +43,17 @@
         super(ServerActionsTestJSON, self).setUp()
         # Check if the server is in a clean state after test
         try:
+            validation_resources = self.get_class_validation_resources(
+                self.os_primary)
+            # _test_rebuild_server test compares ip address attached to the
+            # server before and after the rebuild, in order to avoid
+            # a situation when a newly created server doesn't have a floating
+            # ip attached at the beginning of the test_rebuild_server let's
+            # make sure right here the floating ip is attached
+            waiters.wait_for_server_floating_ip(
+                self.client,
+                self.client.show_server(self.server_id)['server'],
+                validation_resources['floating_ip'])
             waiters.wait_for_server_status(self.client,
                                            self.server_id, 'ACTIVE')
         except lib_exc.NotFound:
@@ -54,12 +65,12 @@
             server = self.create_test_server(
                 validatable=True,
                 validation_resources=validation_resources,
-                wait_until='ACTIVE')
+                wait_until='SSHABLE')
             self.__class__.server_id = server['id']
         except Exception:
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.recreate_server(
-                self.server_id, validatable=True)
+                self.server_id, validatable=True, wait_until='SSHABLE')
 
     def tearDown(self):
         super(ServerActionsTestJSON, self).tearDown()
@@ -81,7 +92,8 @@
     @classmethod
     def resource_setup(cls):
         super(ServerActionsTestJSON, cls).resource_setup()
-        cls.server_id = cls.recreate_server(None, validatable=True)
+        cls.server_id = cls.recreate_server(None, validatable=True,
+                                            wait_until='SSHABLE')
 
     @decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
@@ -781,3 +793,28 @@
         self.assertEqual('novnc', body['type'])
         self.assertNotEqual('', body['url'])
         self._validate_url(body['url'])
+
+
+class ServersAaction247Test(base.BaseV2ComputeTest):
+    """Test compute server with microversion greater than 2.47
+
+    # NOTE(gmann): This test tests the Server create backup APIs
+    # response schema for 2.47 microversion. No specific assert
+    # or behaviour verification is needed.
+    """
+
+    min_microversion = '2.47'
+
+    @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
+                          'Snapshotting not available, backup not possible.')
+    @utils.services('image')
+    @decorators.idempotent_id('252a4bdd-6366-4dae-9994-8c30aa660f23')
+    def test_create_backup(self):
+        server = self.create_test_server(wait_until='ACTIVE')
+
+        backup1 = data_utils.rand_name('backup-1')
+        # Just check create_back to verify the schema with 2.47
+        self.servers_client.create_backup(server['id'],
+                                          backup_type='daily',
+                                          rotation=2,
+                                          name=backup1)
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 4c0d021..4b6d45a 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -17,7 +17,6 @@
 
 from tempest.api.compute import base
 from tempest.common import compute
-from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
@@ -32,21 +31,26 @@
     create_default_network = True
 
     @classmethod
-    def skip_checks(cls):
-        super(ServerGroupTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('os-server-groups', 'compute'):
-            msg = "os-server-groups extension is not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def setup_clients(cls):
         super(ServerGroupTestJSON, cls).setup_clients()
         cls.client = cls.server_groups_client
 
     @classmethod
+    def _set_policy(cls, policy):
+        if not cls.is_requested_microversion_compatible('2.63'):
+            return policy[0]
+        else:
+            return policy
+
+    @classmethod
     def resource_setup(cls):
         super(ServerGroupTestJSON, cls).resource_setup()
-        cls.policy = ['affinity']
+        if cls.is_requested_microversion_compatible('2.63'):
+            cls.policy_field = 'policies'
+            cls.policy = ['affinity']
+        else:
+            cls.policy_field = 'policy'
+            cls.policy = 'affinity'
 
     def setUp(self):
         super(ServerGroupTestJSON, self).setUp()
@@ -61,9 +65,9 @@
 
     def _create_server_group(self, name, policy):
         # create the test server-group with given policy
-        server_group = {'name': name, 'policies': policy}
+        server_group = {'name': name, self.policy_field: policy}
         body = self.create_test_server_group(name, policy)
-        for key in ['name', 'policies']:
+        for key in ['name', self.policy_field]:
             self.assertEqual(server_group[key], body[key])
         return body
 
@@ -88,7 +92,7 @@
     @decorators.idempotent_id('3645a102-372f-4140-afad-13698d850d23')
     def test_create_delete_server_group_with_anti_affinity_policy(self):
         """Test Create/Delete the server-group with anti-affinity policy"""
-        policy = ['anti-affinity']
+        policy = self._set_policy(['anti-affinity'])
         self._create_delete_server_group(policy)
 
     @decorators.idempotent_id('154dc5a4-a2fe-44b5-b99e-f15806a4a113')
@@ -99,7 +103,7 @@
         for _ in range(0, 2):
             server_groups.append(self._create_server_group(server_group_name,
                                                            self.policy))
-        for key in ['name', 'policies']:
+        for key in ['name', self.policy_field]:
             self.assertEqual(server_groups[0][key], server_groups[1][key])
         self.assertNotEqual(server_groups[0]['id'], server_groups[1]['id'])
 
@@ -134,3 +138,24 @@
         server_group = (self.server_groups_client.show_server_group(
             self.created_server_group['id'])['server_group'])
         self.assertIn(server['id'], server_group['members'])
+
+
+class ServerGroup264TestJSON(base.BaseV2ComputeTest):
+    """These tests check for the server-group APIs 2.64 microversion.
+
+    This tests is only to verify the POST, GET server-groups APIs response
+    schema with 2.64 microversion
+    """
+    create_default_network = True
+    min_microversion = '2.64'
+
+    @decorators.idempotent_id('b52f09dd-2133-4037-9a5d-bdb260096a88')
+    def test_create_get_server_group(self):
+        # create, get the test server-group with given policy
+        server_group = self.create_test_server_group(
+            name='server-group', policy='affinity')
+        self.addCleanup(
+            self.server_groups_client.delete_server_group,
+            server_group['id'])
+        self.server_groups_client.list_server_groups()
+        self.server_groups_client.show_server_group(server_group['id'])
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 354e3b9..716ecda 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import compute
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
@@ -112,7 +113,6 @@
 
 
 class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
-    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -124,19 +124,31 @@
             msg = "Stable rescue not available."
             raise cls.skipException(msg)
 
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources(network=True, subnet=True, router=True,
+                                  dhcp=True)
+        super(BaseServerStableDeviceRescueTest, cls).setup_credentials()
+
     def _create_server_and_rescue_image(self, hw_rescue_device=None,
                                         hw_rescue_bus=None,
-                                        block_device_mapping_v2=None):
-
-        server_id = self.create_test_server(
-            wait_until='ACTIVE')['id']
+                                        block_device_mapping_v2=None,
+                                        validatable=False,
+                                        validation_resources=None,
+                                        wait_until='ACTIVE'):
+        server = self.create_test_server(
+            wait_until=wait_until,
+            validatable=validatable,
+            validation_resources=validation_resources)
         image_id = self.create_image_from_server(
-            server_id, wait_until='ACTIVE')['id']
+            server['id'], wait_until='ACTIVE')['id']
 
         if block_device_mapping_v2:
-            server_id = self.create_test_server(
-                wait_until='ACTIVE',
-                block_device_mapping_v2=block_device_mapping_v2)['id']
+            server = self.create_test_server(
+                wait_until=wait_until,
+                validatable=validatable,
+                validation_resources=validation_resources,
+                block_device_mapping_v2=block_device_mapping_v2)
 
         if hw_rescue_bus:
             self.images_client.update_image(
@@ -146,16 +158,28 @@
             self.images_client.update_image(
                 image_id, [dict(add='/hw_rescue_device',
                                 value=hw_rescue_device)])
-        return server_id, image_id
+        return server, image_id
 
-    def _test_stable_device_rescue(self, server_id, rescue_image_id):
+    def _test_stable_device_rescue(
+            self, server, rescue_image_id,
+            validation_resources=None):
         self.servers_client.rescue_server(
-            server_id, rescue_image_ref=rescue_image_id)
+            server['id'], rescue_image_ref=rescue_image_id)
         waiters.wait_for_server_status(
-            self.servers_client, server_id, 'RESCUE')
-        self.servers_client.unrescue_server(server_id)
-        waiters.wait_for_server_status(
-            self.servers_client, server_id, 'ACTIVE')
+            self.servers_client, server['id'], 'RESCUE')
+        self.servers_client.unrescue_server(server['id'])
+        # NOTE(gmann) In next addCleanup, server unrescue is called before the
+        # detach volume is called in cleanup (added by self.attach_volume()
+        # method) so to make sure server is ready before detach operation, we
+        # need to perform ssh on it, more details are in bug#1960346.
+        if validation_resources and CONF.validation.run_validation:
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, validation_resources, "SSHABLE", True)
+        else:
+            waiters.wait_for_server_status(
+                self.servers_client, server['id'], 'ACTIVE')
 
 
 class ServerStableDeviceRescueTestIDE(BaseServerStableDeviceRescueTest):
@@ -172,9 +196,9 @@
                       "Aarch64 does not support ide bus for cdrom")
     def test_stable_device_rescue_cdrom_ide(self):
         """Test rescuing server with cdrom and ide as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='cdrom', hw_rescue_bus='ide')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
 
 class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
@@ -183,23 +207,23 @@
     @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
     def test_stable_device_rescue_disk_virtio(self):
         """Test rescuing server with disk and virtio as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
     def test_stable_device_rescue_disk_scsi(self):
         """Test rescuing server with disk and scsi as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='scsi')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
     def test_stable_device_rescue_disk_usb(self):
         """Test rescuing server with disk and usb as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='usb')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
     @utils.services('volume')
@@ -209,14 +233,25 @@
         Attach a volume to the server and then rescue the server with disk
         and virtio as the rescue disk.
         """
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
-            hw_rescue_device='disk', hw_rescue_bus='virtio')
-        server = self.servers_client.show_server(server_id)['server']
+        # This test just check detach fail and does not
+        # perfom the detach operation but in cleanup from
+        # self.attach_volume() it will try to detach the server
+        # after unrescue the server. Due to that we need to make
+        # server SSHable before it try to detach, more details are
+        # in bug#1960346
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio', validatable=True,
+            validation_resources=validation_resources, wait_until="SSHABLE")
+        server = self.servers_client.show_server(server['id'])['server']
         volume = self.create_volume()
         self.attach_volume(server, volume)
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'in-use')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(
+            server, rescue_image_id,
+            validation_resources=validation_resources)
 
 
 class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
@@ -248,10 +283,10 @@
             "source_type": "blank",
             "volume_size": CONF.volume.volume_size,
             "destination_type": "volume"}]
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio',
             block_device_mapping_v2=block_device_mapping_v2)
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.attr(type='slow')
     @decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
@@ -267,7 +302,7 @@
             "volume_size": CONF.volume.volume_size,
             "uuid": CONF.compute.image_ref,
             "destination_type": "volume"}]
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio',
             block_device_mapping_v2=block_device_mapping_v2)
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 9bcf062..955ba1c 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import compute
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
@@ -38,7 +39,8 @@
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources(network=True, subnet=True, router=True)
+        cls.set_network_resources(network=True, subnet=True, router=True,
+                                  dhcp=True)
         super(ServerRescueNegativeTestJSON, cls).setup_credentials()
 
     @classmethod
@@ -136,21 +138,41 @@
     def test_rescued_vm_detach_volume(self):
         """Test detaching volume from a rescued server should fail"""
         volume = self.create_volume()
-
+        # This test just check detach fail and does not
+        # perfom the detach operation but in cleanup from
+        # self.attach_volume() it will try to detach the server
+        # after unrescue the server. Due to that we need to make
+        # server SSHable before it try to detach, more details are
+        # in bug#1960346
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            adminPass=self.password,
+            wait_until="SSHABLE",
+            validatable=True,
+            validation_resources=validation_resources)
         # Attach the volume to the server
-        server = self.servers_client.show_server(self.server_id)['server']
         self.attach_volume(server, volume)
 
         # Rescue the server
-        self.servers_client.rescue_server(self.server_id,
+        self.servers_client.rescue_server(server['id'],
                                           adminPass=self.password)
         waiters.wait_for_server_status(self.servers_client,
-                                       self.server_id, 'RESCUE')
+                                       server['id'], 'RESCUE')
+        # NOTE(gmann) In next addCleanup, server unrescue is called before the
+        # detach volume is called in cleanup (added by self.attach_volume()
+        # method) so to make sure server is ready before detach operation, we
+        # need to perform ssh on it, more details are in bug#1960346.
+        if CONF.validation.run_validation:
+            tenant_network = self.get_tenant_network()
+            self.addCleanup(compute.wait_for_ssh_or_ping,
+                            server, self.os_primary, tenant_network,
+                            True, validation_resources, "SSHABLE", True)
         # addCleanup is a LIFO queue
-        self.addCleanup(self._unrescue, self.server_id)
+        self.addCleanup(self._unrescue, server['id'])
 
         # Detach the volume from the server expecting failure
         self.assertRaises(lib_exc.Conflict,
                           self.servers_client.detach_volume,
-                          self.server_id,
+                          server['id'],
                           volume['id'])
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index c988788..cdeaae5 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
@@ -28,13 +27,6 @@
     create_default_network = True
 
     @classmethod
-    def skip_checks(cls):
-        super(ServerTagsTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('os-server-tags', 'compute'):
-            msg = "os-server-tags extension is not enabled."
-            raise cls.skipException(msg)
-
-    @classmethod
     def setup_clients(cls):
         super(ServerTagsTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 3318876..034cb9e 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -16,7 +16,6 @@
 from oslo_log import log as logging
 
 from tempest.api.compute import base
-from tempest.common import utils
 from tempest import config
 from tempest.lib import decorators
 
@@ -32,24 +31,14 @@
     @decorators.idempotent_id('3bb27738-b759-4e0d-a5fa-37d7a6df07d1')
     def test_list_extensions(self):
         """Test listing compute extensions"""
-        if not CONF.compute_feature_enabled.api_extensions:
-            raise self.skipException('There are not any extensions configured')
         extensions = self.extensions_client.list_extensions()['extensions']
-        ext = CONF.compute_feature_enabled.api_extensions[0]
-
         # Log extensions list
         extension_list = [x['alias'] for x in extensions]
         LOG.debug("Nova extensions: %s", ','.join(extension_list))
 
-        if ext == 'all':
-            self.assertIn('Hosts', map(lambda x: x['name'], extensions))
-        elif ext:
-            self.assertIn(ext, extension_list)
-        else:
-            raise self.skipException('There are not any extensions configured')
+        self.assertIn('Hosts', map(lambda x: x['name'], extensions))
 
     @decorators.idempotent_id('05762f39-bdfa-4cdb-9b46-b78f8e78e2fd')
-    @utils.requires_ext(extension='os-consoles', service='compute')
     def test_get_extension(self):
         """Test getting specified compute extension details"""
         extension = self.extensions_client.show_extension('os-consoles')
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 5fe0e3b..38ca53b 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -15,20 +15,12 @@
 
 from tempest.api.compute import base
 from tempest.common import tempest_fixtures as fixtures
-from tempest.common import utils
 from tempest.lib import decorators
 
 
 class QuotasTestJSON(base.BaseV2ComputeTest):
     """Test compute quotas"""
 
-    @classmethod
-    def skip_checks(cls):
-        super(QuotasTestJSON, cls).skip_checks()
-        if not utils.is_extension_enabled('os-quota-sets', 'compute'):
-            msg = "quotas extension not enabled."
-            raise cls.skipException(msg)
-
     def setUp(self):
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 4c7c234..5380c67 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -49,7 +49,7 @@
         server = self.create_test_server(
             validatable=True,
             validation_resources=validation_resources,
-            wait_until='ACTIVE',
+            wait_until='SSHABLE',
             adminPass=self.image_ssh_password)
         self.addCleanup(self.delete_server, server['id'])
         # Record addresses so that we can ssh later
@@ -378,10 +378,19 @@
                   the created volume, and dict of server ID to volumeAttachment
                   dict entries
         """
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+
         servers = []
         for x in range(2):
             name = 'multiattach-server-%i' % x
-            servers.append(self.create_test_server(name=name))
+            servers.append(
+                self.create_test_server(
+                    name=name,
+                    validatable=True,
+                    validation_resources=validation_resources
+                )
+            )
 
         # Now wait for the servers to be ACTIVE.
         for server in servers:
@@ -492,7 +501,10 @@
         servers, volume, _ = self._create_and_multiattach()
 
         for server in servers:
-            self.resize_server(server['id'], self.flavor_ref_alt)
+            # We need to wait until the guest OS fully boots up as we are going
+            # to detach volumes after the resize. See bug #1960346.
+            self.resize_server(
+                server['id'], self.flavor_ref_alt, wait_until='SSHABLE')
 
         for server in servers:
             self._detach_multiattach_volume(volume['id'], server['id'])
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 516f599..43b4bf5 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.compute import base
+from tempest.api.compute.volumes import test_attach_volume
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -20,24 +20,15 @@
 CONF = config.CONF
 
 
-class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+class AttachVolumeNegativeTest(test_attach_volume.BaseAttachVolumeTest):
     """Negative tests of volume attaching"""
 
-    create_default_network = True
-
-    @classmethod
-    def skip_checks(cls):
-        super(AttachVolumeNegativeTest, cls).skip_checks()
-        if not CONF.service_available.cinder:
-            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
-            raise cls.skipException(skip_msg)
-
     @decorators.attr(type=['negative'])
     @decorators.related_bug('1630783', status_code=500)
     @decorators.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
     def test_delete_attached_volume(self):
         """Test deleting attachemd volume should fail"""
-        server = self.create_test_server(wait_until='ACTIVE')
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
         self.attach_volume(server, volume)
 
@@ -54,7 +45,7 @@
         depending on whether or not cinder v3.27 is being used to attach
         the volume to the instance.
         """
-        server = self.create_test_server(wait_until='ACTIVE')
+        server, validation_resources = self._create_server()
         volume = self.create_volume()
 
         self.attach_volume(server, volume)
@@ -66,12 +57,12 @@
     @decorators.idempotent_id('ee37a796-2afb-11e7-bc0f-fa163e65f5ce')
     def test_attach_attached_volume_to_different_server(self):
         """Test attaching attached volume to different server should fail"""
-        server1 = self.create_test_server(wait_until='ACTIVE')
+        server1, validation_resources = self._create_server()
         volume = self.create_volume()
 
         self.attach_volume(server1, volume)
 
         # Create server2 and attach in-use volume
-        server2 = self.create_test_server(wait_until='ACTIVE')
+        server2, validation_resources = self._create_server()
         self.assertRaises(lib_exc.BadRequest,
                           self.attach_volume, server2, volume)
diff --git a/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
index 0fe49f9..c411aa9 100644
--- a/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
@@ -64,6 +64,7 @@
         self.assertEqual(False, body['protected'])
         # now able to delete the non-protected namespace
         self.namespaces_client.delete_namespace(namespace_name)
+        self.namespaces_client.wait_for_resource_deletion(namespace_name)
 
     def _cleanup_namespace(self, namespace_name):
         body = self.namespaces_client.show_namespace(namespace_name)
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index e75e22a..4ca7412 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -78,7 +78,7 @@
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hashlib.sha256
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 4ad8428..e5f4cf2 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -83,7 +83,7 @@
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hashlib.sha256
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 3c76eca..9a85ed4 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -47,6 +47,11 @@
                         'volume_backend_name',
                         'storage_protocol')
 
+        # List of storage protocols variants defined in cinder.common.constants
+        # The canonical name for storage protocol comes first in the list
+        VARIANTS = [['iSCSI', 'iscsi'], ['FC', 'fibre_channel', 'fc'],
+                    ['NFS', 'nfs'], ['NVMe-oF', 'NVMeOF', 'nvmeof']]
+
         # Get list backend capabilities using show_pools
         cinder_pools = [
             pool['capabilities'] for pool in
@@ -64,4 +69,23 @@
                                         cinder_pools)))
         observed_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
                                         capabilities)))
+
+        # Cinder Bug #1966103: Some drivers were reporting different strings
+        # to represent the same storage protocol. For backward compatibility,
+        # the scheduler can handle the variants, but to standardize this for
+        # operators (who may need to refer to the protocol in volume-type
+        # extra-specs), the get-pools response was changed by I07d74078dbb1
+        # to only report the canonical name for a storage protocol. Thus, the
+        # expected_list (which we got from the get-pools call) will only
+        # contain canonical names, while the observed_list (which we got
+        # from the driver capabilities call) may contain a variant. So before
+        # comparing the lists, we need to look for known variants in the
+        # observed_list elements and replace them with their canonical values
+        for item in range(len(observed_list)):
+            for variants in VARIANTS:
+                if observed_list[item][2] in variants:
+                    observed_list[item] = (observed_list[item][0],
+                                           observed_list[item][1],
+                                           variants[0])
+
         self.assertEqual(expected_list, observed_list)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 8154682..406af27 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -71,3 +71,39 @@
             self.admin_group_types_client.list_group_types()['group_types'])
         group_ids = [it['id'] for it in group_list]
         self.assertNotIn(body['id'], group_ids)
+
+    @decorators.idempotent_id('3d5e5cec-72b4-4511-b135-7cc2b7a053ae')
+    def test_group_type_list_by_optional_params(self):
+        """Test list group type sort/public"""
+        type_a_name = "a_{}".format(data_utils.rand_name('group-type'))
+        type_b_name = "b_{}".format(data_utils.rand_name('group-type'))
+        self.create_group_type(name=type_a_name, **{'is_public': True})
+        self.create_group_type(name=type_b_name, **{'is_public': False})
+
+        group_list = (
+            self.admin_group_types_client.list_group_types(
+                sort="name:asc", is_public=None)['group_types'])
+        name_list = [it['name'] for it in group_list]
+        self.assertLess(
+            name_list.index(type_a_name), name_list.index(type_b_name))
+
+        group_list = (
+            self.admin_group_types_client.list_group_types(
+                sort="name:desc", is_public=None)['group_types'])
+        name_list = [it['name'] for it in group_list]
+        self.assertLess(name_list.index(type_b_name),
+                        name_list.index(type_a_name))
+
+        group_list = (
+            self.admin_group_types_client.list_group_types(
+                is_public=False)['group_types'])
+        name_list = [it['name'] for it in group_list]
+        self.assertNotIn(type_a_name, name_list)
+        self.assertIn(type_b_name, name_list)
+
+        group_list = (
+            self.admin_group_types_client.list_group_types(
+                is_public=True)['group_types'])
+        name_list = [it['name'] for it in group_list]
+        self.assertNotIn(type_b_name, name_list)
+        self.assertIn(type_a_name, name_list)
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index b90b5bb..172b6ed 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -98,7 +98,7 @@
     def create_volume(cls, wait_until='available', **kwargs):
         """Wrapper utility that returns a test volume.
 
-           :param wait_until: wait till volume status.
+           :param wait_until: wait till volume status, None means no wait.
         """
         if 'size' not in kwargs:
             kwargs['size'] = CONF.volume.volume_size
@@ -127,8 +127,9 @@
         cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                     cls.delete_volume, cls.volumes_client,
                                     volume['id'])
-        waiters.wait_for_volume_resource_status(cls.volumes_client,
-                                                volume['id'], wait_until)
+        if wait_until:
+            waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                    volume['id'], wait_until)
         return volume
 
     @classmethod
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 0db1ab1..3d476b9 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -130,7 +130,7 @@
             msg = ('Glance is available in the catalog, but no known version, '
                    '(v1.x or v2.x) of Glance could be found, so Glance should '
                    'be configured as not available')
-            LOG.warn(msg)
+            LOG.warning(msg)
             print_and_or_update('glance', 'service-available', False, update)
             return
 
@@ -214,7 +214,6 @@
 
 def get_extension_client(os, service):
     extensions_client = {
-        'nova': os.compute.ExtensionsClient(),
         'neutron': os.network.ExtensionsClient(),
         'swift': os.object_storage.CapabilitiesClient(),
         # NOTE: Cinder v3 API is current and v2 and v1 are deprecated.
@@ -231,7 +230,6 @@
 
 def get_enabled_extensions(service):
     extensions_options = {
-        'nova': CONF.compute_feature_enabled.api_extensions,
         'cinder': CONF.volume_feature_enabled.api_extensions,
         'neutron': CONF.network_feature_enabled.api_extensions,
         'swift': CONF.object_storage_feature_enabled.discoverable_apis,
@@ -442,7 +440,7 @@
         os = clients.Manager(icreds.get_primary_creds().credentials)
         services = check_service_availability(os, update)
         results = {}
-        for service in ['nova', 'cinder', 'neutron', 'swift']:
+        for service in ['cinder', 'neutron', 'swift']:
             if service not in services:
                 continue
             results = verify_extensions(os, service, results)
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2443a67..eb7e366 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -15,7 +15,7 @@
 
 import base64
 import socket
-import ssl
+from ssl import SSLContext as sslc
 import struct
 import textwrap
 from urllib import parse as urlparse
@@ -23,11 +23,14 @@
 from oslo_log import log as logging
 from oslo_utils import excutils
 
+from tempest.common.utils.linux import remote_client
 from tempest.common import waiters
 from tempest import config
+from tempest import exceptions
 from tempest.lib.common import fixed_network
 from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
 
 CONF = config.CONF
 
@@ -54,10 +57,104 @@
     return False
 
 
+def get_server_ip(server, validation_resources=None):
+    """Get the server fixed or floating IP.
+
+    Based on the configuration we're in, return a correct ip
+    address for validating that a guest is up.
+
+    :param server: The server dict as returned by the API
+    :param validation_resources: The dict of validation resources
+        provisioned for the server.
+    """
+    if CONF.validation.connect_method == 'floating':
+        if validation_resources:
+            return validation_resources['floating_ip']['ip']
+        else:
+            msg = ('When validation.connect_method equals floating, '
+                   'validation_resources cannot be None')
+            raise lib_exc.InvalidParam(invalid_param=msg)
+    elif CONF.validation.connect_method == 'fixed':
+        addresses = server['addresses'][CONF.validation.network_for_ssh]
+        for address in addresses:
+            if address['version'] == CONF.validation.ip_version_for_ssh:
+                return address['addr']
+        raise exceptions.ServerUnreachable(server_id=server['id'])
+    else:
+        raise lib_exc.InvalidConfiguration()
+
+
+def _setup_validation_fip(
+        server, clients, tenant_network, validation_resources):
+    if CONF.service_available.neutron:
+        ifaces = clients.interfaces_client.list_interfaces(server['id'])
+        validation_port = None
+        for iface in ifaces['interfaceAttachments']:
+            if iface['net_id'] == tenant_network['id']:
+                validation_port = iface['port_id']
+                break
+        if not validation_port:
+            # NOTE(artom) This will get caught by the catch-all clause in
+            # the wait_until loop below
+            raise ValueError('Unable to setup floating IP for validation: '
+                             'port not found on tenant network')
+        clients.floating_ips_client.update_floatingip(
+            validation_resources['floating_ip']['id'],
+            port_id=validation_port)
+    else:
+        fip_client = clients.compute_floating_ips_client
+        fip_client.associate_floating_ip_to_server(
+            floating_ip=validation_resources['floating_ip']['ip'],
+            server_id=server['id'])
+
+
+def wait_for_ssh_or_ping(server, clients, tenant_network,
+                         validatable, validation_resources, wait_until,
+                         set_floatingip):
+    """Wait for the server for SSH or Ping as requested.
+
+    :param server: The server dict as returned by the API
+    :param clients: Client manager which provides OpenStack Tempest clients.
+    :param tenant_network: Tenant network to be used for creating a server.
+    :param validatable: Whether the server will be pingable or sshable.
+    :param validation_resources: Resources created for the connection to the
+        server. Include a keypair, a security group and an IP.
+    :param wait_until: Server status to wait for the server to reach.
+        It can be PINGABLE and SSHABLE states when the server is both
+        validatable and has the required validation_resources provided.
+    :param set_floatingip: If FIP needs to be associated to server
+    """
+    if set_floatingip and CONF.validation.connect_method == 'floating':
+        _setup_validation_fip(
+            server, clients, tenant_network, validation_resources)
+
+    server_ip = get_server_ip(
+        server, validation_resources=validation_resources)
+    if wait_until == 'PINGABLE':
+        waiters.wait_for_ping(
+            server_ip,
+            clients.servers_client.build_timeout,
+            clients.servers_client.build_interval
+        )
+    if wait_until == 'SSHABLE':
+        pkey = validation_resources['keypair']['private_key']
+        ssh_client = remote_client.RemoteClient(
+            server_ip,
+            CONF.validation.image_ssh_user,
+            pkey=pkey,
+            server=server,
+            servers_client=clients.servers_client
+        )
+        waiters.wait_for_ssh(
+            ssh_client,
+            clients.servers_client.build_timeout
+        )
+
+
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
-                       image_id=None, wait_for_sshable=True, **kwargs):
+                       image_id=None, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -69,7 +166,9 @@
         server. Include a keypair, a security group and an IP.
     :param tenant_network: Tenant network to be used for creating a server.
     :param wait_until: Server status to wait for the server to reach after
-        its creation.
+        its creation. Additionally PINGABLE and SSHABLE states are also
+        accepted when the server is both validatable and has the required
+        validation_resources provided.
     :param volume_backed: Whether the server is volume backed or not.
         If this is true, a volume will be created and create server will be
         requested with 'block_device_mapping_v2' populated with below values:
@@ -93,13 +192,9 @@
         CONF.compute.flavor_ref will be used instead.
     :param image_id: ID of the image to be used to provision the server. If not
         defined, CONF.compute.image_ref will be used instead.
-    :param wait_for_sshable: Check server's console log and wait until it will
-        be ready to login.
     :returns: a tuple
     """
 
-    # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE
-
     if name is None:
         name = data_utils.rand_name(__name__ + "-instance")
     if flavor is None:
@@ -209,40 +304,31 @@
         body = rest_client.ResponseBody(body.response, body['server'])
         servers = [body]
 
-    def _setup_validation_fip():
-        if CONF.service_available.neutron:
-            ifaces = clients.interfaces_client.list_interfaces(server['id'])
-            validation_port = None
-            for iface in ifaces['interfaceAttachments']:
-                if iface['net_id'] == tenant_network['id']:
-                    validation_port = iface['port_id']
-                    break
-            if not validation_port:
-                # NOTE(artom) This will get caught by the catch-all clause in
-                # the wait_until loop below
-                raise ValueError('Unable to setup floating IP for validation: '
-                                 'port not found on tenant network')
-            clients.floating_ips_client.update_floatingip(
-                validation_resources['floating_ip']['id'],
-                port_id=validation_port)
-        else:
-            fip_client = clients.compute_floating_ips_client
-            fip_client.associate_floating_ip_to_server(
-                floating_ip=validation_resources['floating_ip']['ip'],
-                server_id=servers[0]['id'])
-
     if wait_until:
+
+        # NOTE(lyarwood): PINGABLE and SSHABLE both require the instance to
+        # go ACTIVE initially before we can setup the fip(s) etc so stash
+        # this additional wait state for later use.
+        wait_until_extra = None
+        if wait_until in ['PINGABLE', 'SSHABLE']:
+            wait_until_extra = wait_until
+            wait_until = 'ACTIVE'
+
         for server in servers:
             try:
                 waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
-
-                # Multiple validatable servers are not supported for now. Their
-                # creation will fail with the condition above.
                 if CONF.validation.run_validation and validatable:
                     if CONF.validation.connect_method == 'floating':
-                        _setup_validation_fip()
+                        _setup_validation_fip(
+                            server, clients, tenant_network,
+                            validation_resources)
+                    if wait_until_extra:
+                        wait_for_ssh_or_ping(
+                            server, clients, tenant_network,
+                            validatable, validation_resources,
+                            wait_until_extra, False)
 
             except Exception:
                 with excutils.save_and_reraise_exception():
@@ -267,10 +353,6 @@
                             LOG.exception('Server %s failed to delete in time',
                                           server['id'])
 
-    if (validatable and CONF.compute_feature_enabled.console_output and
-            wait_for_sshable):
-        waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
-
     return body, servers
 
 
@@ -313,7 +395,8 @@
         af, socktype, proto, _, sa = res
         client_socket = socket.socket(af, socktype, proto)
         if url.scheme == 'https':
-            client_socket = ssl.wrap_socket(client_socket)
+            client_socket = sslc().wrap_socket(client_socket,
+                                               server_hostname=url.hostname)
         client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
         try:
             client_socket.connect(sa)
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 88a16b7..0fa5ce4 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -96,7 +96,6 @@
 
     """
     config_dict = {
-        'compute': CONF.compute_feature_enabled.api_extensions,
         'volume': CONF.volume_feature_enabled.api_extensions,
         'network': CONF.network_feature_enabled.api_extensions,
         'object': CONF.object_storage_feature_enabled.discoverable_apis,
diff --git a/tempest/common/utils/net_downtime.py b/tempest/common/utils/net_downtime.py
new file mode 100644
index 0000000..9675ec8
--- /dev/null
+++ b/tempest/common/utils/net_downtime.py
@@ -0,0 +1,63 @@
+# Copyright 2022 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import signal
+import subprocess
+
+import fixtures
+
+from oslo_log import log
+
+
+LOG = log.getLogger(__name__)
+
+
+class NetDowntimeMeter(fixtures.Fixture):
+    def __init__(self, dest_ip, interval='0.2'):
+        self.dest_ip = dest_ip
+        # Note: for intervals lower than 0.2 ping requires root privileges
+        self.interval = interval
+        self.ping_process = None
+
+    def _setUp(self):
+        self.start_background_pinger()
+
+    def start_background_pinger(self):
+        cmd = ['ping', '-q', '-s1']
+        cmd.append('-i{}'.format(self.interval))
+        cmd.append(self.dest_ip)
+        LOG.debug("Starting background pinger to '{}' with interval {}".format(
+            self.dest_ip, self.interval))
+        self.ping_process = subprocess.Popen(
+            cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self.addCleanup(self.cleanup)
+
+    def cleanup(self):
+        if self.ping_process and self.ping_process.poll() is None:
+            LOG.debug('Terminating background pinger with pid {}'.format(
+                self.ping_process.pid))
+            self.ping_process.terminate()
+        self.ping_process = None
+
+    def get_downtime(self):
+        self.ping_process.send_signal(signal.SIGQUIT)
+        # Example of the expected output:
+        # 264/274 packets, 3% loss
+        output = self.ping_process.stderr.readline().strip().decode('utf-8')
+        if output and len(output.split()[0].split('/')) == 2:
+            succ, total = output.split()[0].split('/')
+            return (int(total) - int(succ)) * float(self.interval)
+        else:
+            LOG.warning('Unexpected output obtained from the pinger: %s',
+                        output)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 21d0109..ab401fb 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -10,6 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
 import re
 import time
 
@@ -530,23 +531,6 @@
             raise lib_exc.TimeoutException(message)
 
 
-def wait_for_guest_os_boot(client, server_id):
-    start_time = int(time.time())
-    while True:
-        console_output = client.get_console_output(server_id)['output']
-        for line in console_output.split('\n'):
-            if 'login:' in line.lower():
-                return
-        if int(time.time()) - start_time >= client.build_timeout:
-            LOG.info("Guest OS on server %s probably isn't ready or its "
-                     "console log can't be parsed properly. If guest OS "
-                     "isn't ready, that may cause problems with SSH to "
-                     "the server.",
-                     server_id)
-            return
-        time.sleep(client.build_interval)
-
-
 def wait_for_server_floating_ip(servers_client, server, floating_ip,
                                 wait_for_disassociate=False):
     """Wait for floating IP association or disassociation.
@@ -587,3 +571,26 @@
                        'in time.' % (floating_ip, server['id']))
             raise lib_exc.TimeoutException(msg)
         time.sleep(servers_client.build_interval)
+
+
+def wait_for_ping(server_ip, timeout=30, interval=1):
+    """Waits for an address to become pingable"""
+    start_time = int(time.time())
+    while int(time.time()) - start_time < timeout:
+        response = os.system("ping -c 1 " + server_ip)
+        if response == 0:
+            return
+        time.sleep(interval)
+    raise lib_exc.TimeoutException()
+
+
+def wait_for_ssh(ssh_client, timeout=30):
+    """Waits for SSH connection to become usable"""
+    start_time = int(time.time())
+    while int(time.time()) - start_time < timeout:
+        try:
+            ssh_client.validate_authentication()
+            return
+        except lib_exc.SSHTimeout:
+            pass
+    raise lib_exc.TimeoutException()
diff --git a/tempest/config.py b/tempest/config.py
index 03ddbf5..4098f32 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -450,18 +450,6 @@
                      "the '.' with '-' to comply with fqdn hostname. Nova "
                      "changed that in Wallaby cycle, if your cloud is older "
                      "than wallaby then you can keep/make it False."),
-    cfg.ListOpt('api_extensions',
-                default=['all'],
-                help='A list of enabled compute extensions with a special '
-                     'entry all which indicates every extension is enabled. '
-                     'Each extension should be specified with alias name. '
-                     'Empty list indicates all extensions are disabled',
-                     deprecated_for_removal=True,
-                     deprecated_reason='The Nova extensions API and mechanism '
-                                       'is deprecated. This option will be '
-                                       'removed when all releases supported '
-                                       'by tempest no longer contain the Nova '
-                                       'extensions API and mechanism.'),
     cfg.BoolOpt('change_password',
                 default=False,
                 help="Does the test environment support changing the admin "
@@ -653,6 +641,9 @@
                 default=True,
                 help='Does the test environment support attaching devices '
                      'using an IDE bus to the instance?'),
+    cfg.BoolOpt('unified_limits',
+                default=False,
+                help='Does the test environment support unified limits?'),
 ]
 
 
@@ -873,7 +864,7 @@
     cfg.StrOpt('qos_placement_physnet', default=None,
                help='Name of the physnet for placement based minimum '
                     'bandwidth allocation.'),
-    cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
+    cfg.StrOpt('provider_net_base_segmentation_id', default='3000',
                help='Base segmentation ID to create provider networks. '
                     'This value will be increased in case of conflict.'),
     cfg.BoolOpt('qos_min_bw_and_pps', default=False,
@@ -971,9 +962,15 @@
                help="Network used for SSH connections. Ignored if "
                     "connect_method=floating."),
     cfg.StrOpt('ssh_key_type',
-               default='rsa',
+               default='ecdsa',
                help='Type of key to use for ssh connections. '
                     'Valid types are rsa, ecdsa'),
+    cfg.IntOpt('allowed_network_downtime',
+               default=5.0,
+               help="Allowed VM network connection downtime during live "
+                    "migration, in seconds. "
+                    "When the measured downtime exceeds this value, an "
+                    "exception is raised."),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index c1e6b2d..1c9c55b 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -318,3 +318,16 @@
                        " to all negative API tests"
                 )
             _HAVE_NEGATIVE_DECORATOR = False
+
+
+@core.flake8ext
+def no_log_warn(logical_line):
+    """Disallow 'LOG.warn('
+
+    Use LOG.warning() instead of Deprecated LOG.warn().
+    https://docs.python.org/3/library/logging.html#logging.warning
+    """
+
+    msg = ("T118: LOG.warn is deprecated, please use LOG.warning!")
+    if "LOG.warn(" in logical_line:
+        yield (0, msg)
diff --git a/tempest/lib/api_schema/response/compute/v2_1/servers.py b/tempest/lib/api_schema/response/compute/v2_1/servers.py
index 3300298..bd42afd 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/servers.py
@@ -506,6 +506,10 @@
     }
 }
 
+create_backup = {
+    'status_code': [202]
+}
+
 server_actions_common_schema = {
     'status_code': [202]
 }
diff --git a/tempest/lib/api_schema/response/compute/v2_16/servers.py b/tempest/lib/api_schema/response/compute/v2_16/servers.py
index dcd64cf..2b3ce38 100644
--- a/tempest/lib/api_schema/response/compute/v2_16/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_16/servers.py
@@ -172,3 +172,4 @@
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index 0e4bd5c..ba3d787 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -62,3 +62,4 @@
 show_volume_attachment = copy.deepcopy(serversv216.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
 show_instance_action = copy.deepcopy(serversv216.show_instance_action)
+create_backup = copy.deepcopy(serversv216.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_26/servers.py b/tempest/lib/api_schema/response/compute/v2_26/servers.py
index 74c08f1..123eb72 100644
--- a/tempest/lib/api_schema/response/compute/v2_26/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_26/servers.py
@@ -105,3 +105,4 @@
 show_volume_attachment = copy.deepcopy(servers219.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers219.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers219.show_instance_action)
+create_backup = copy.deepcopy(servers219.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_3/servers.py b/tempest/lib/api_schema/response/compute/v2_3/servers.py
index 435e3ac..d19f1ad 100644
--- a/tempest/lib/api_schema/response/compute/v2_3/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_3/servers.py
@@ -177,3 +177,4 @@
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_45/servers.py b/tempest/lib/api_schema/response/compute/v2_45/servers.py
new file mode 100644
index 0000000..cb0fc13
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_45/servers.py
@@ -0,0 +1,49 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_26 import servers as servers226
+
+create_backup = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'image_id': {'type': 'string', 'format': 'uuid'}
+        },
+        'additionalProperties': False,
+        'required': ['image_id']
+    }
+}
+# NOTE(gmann): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.26 ***
+get_server = copy.deepcopy(servers226.get_server)
+list_servers_detail = copy.deepcopy(servers226.list_servers_detail)
+update_server = copy.deepcopy(servers226.update_server)
+rebuild_server = copy.deepcopy(servers226.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers226.rebuild_server_with_admin_pass)
+show_server_diagnostics = copy.deepcopy(servers226.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers226.get_remote_consoles)
+list_tags = copy.deepcopy(servers226.list_tags)
+update_all_tags = copy.deepcopy(servers226.update_all_tags)
+delete_all_tags = copy.deepcopy(servers226.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers226.check_tag_existence)
+update_tag = copy.deepcopy(servers226.update_tag)
+delete_tag = copy.deepcopy(servers226.delete_tag)
+list_servers = copy.deepcopy(servers226.list_servers)
+attach_volume = copy.deepcopy(servers226.attach_volume)
+show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index 7050602..1399c2d 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -13,6 +13,7 @@
 import copy
 
 from tempest.lib.api_schema.response.compute.v2_26 import servers as servers226
+from tempest.lib.api_schema.response.compute.v2_45 import servers as servers245
 
 flavor = {
     'type': 'object',
@@ -34,39 +35,40 @@
     'required': ['original_name', 'disk', 'ephemeral', 'ram', 'swap', 'vcpus']
 }
 
-get_server = copy.deepcopy(servers226.get_server)
+get_server = copy.deepcopy(servers245.get_server)
 get_server['response_body']['properties']['server'][
     'properties'].update({'flavor': flavor})
-list_servers_detail = copy.deepcopy(servers226.list_servers_detail)
+list_servers_detail = copy.deepcopy(servers245.list_servers_detail)
 list_servers_detail['response_body']['properties']['servers']['items'][
     'properties'].update({'flavor': flavor})
 
-update_server = copy.deepcopy(servers226.update_server)
+update_server = copy.deepcopy(servers245.update_server)
 update_server['response_body']['properties']['server'][
     'properties'].update({'flavor': flavor})
 
-rebuild_server = copy.deepcopy(servers226.rebuild_server)
+rebuild_server = copy.deepcopy(servers245.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'flavor': flavor})
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers226.rebuild_server_with_admin_pass)
+    servers245.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'flavor': flavor})
 
 # NOTE(zhufl): Below are the unchanged schema in this microversion. We need
 # to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
-show_server_diagnostics = copy.deepcopy(servers226.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers226.get_remote_consoles)
-list_tags = copy.deepcopy(servers226.list_tags)
-update_all_tags = copy.deepcopy(servers226.update_all_tags)
-delete_all_tags = copy.deepcopy(servers226.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers226.check_tag_existence)
-update_tag = copy.deepcopy(servers226.update_tag)
-delete_tag = copy.deepcopy(servers226.delete_tag)
-list_servers = copy.deepcopy(servers226.list_servers)
-attach_volume = copy.deepcopy(servers226.attach_volume)
-show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
+show_server_diagnostics = copy.deepcopy(servers245.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers245.get_remote_consoles)
+list_tags = copy.deepcopy(servers245.list_tags)
+update_all_tags = copy.deepcopy(servers245.update_all_tags)
+delete_all_tags = copy.deepcopy(servers245.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers245.check_tag_existence)
+update_tag = copy.deepcopy(servers245.update_tag)
+delete_tag = copy.deepcopy(servers245.delete_tag)
+list_servers = copy.deepcopy(servers245.list_servers)
+attach_volume = copy.deepcopy(servers245.attach_volume)
+show_volume_attachment = copy.deepcopy(servers245.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers245.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers226.show_instance_action)
+create_backup = copy.deepcopy(servers245.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_48/servers.py b/tempest/lib/api_schema/response/compute/v2_48/servers.py
index af6344b..5b53906 100644
--- a/tempest/lib/api_schema/response/compute/v2_48/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_48/servers.py
@@ -133,3 +133,4 @@
 show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers247.show_instance_action)
+create_backup = copy.deepcopy(servers247.create_backup)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/compute/v2_50/__init__.py
similarity index 100%
rename from tempest/services/__init__.py
rename to tempest/lib/api_schema/response/compute/v2_50/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py b/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py
new file mode 100644
index 0000000..4ee845f
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_50/quota_classes.py
@@ -0,0 +1,48 @@
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.lib.api_schema.response.compute.v2_1 import quota_classes \
+    as quota_classesv21
+
+# Compute microversion 2.50:
+# 1. fixed_ips, floating_ips, security_group_rules and security_groups
+#    are removed from:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+# 2. server_groups and server_group_members are added to:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+
+get_quota_class_set = copy.deepcopy(quota_classesv21.get_quota_class_set)
+update_quota_class_set = copy.deepcopy(quota_classesv21.update_quota_class_set)
+for field in ['fixed_ips', 'floating_ips', 'security_group_rules',
+              'security_groups']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].remove(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    update_quota_class_set['response_body']['properties'][
+        'quota_class_set']['required'].remove(field)
+for field in ['server_groups', 'server_group_members']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].update({field: {'type': 'integer'}})
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].append(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].update({field: {'type': 'integer'}})
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].append(field)
diff --git a/tempest/lib/api_schema/response/compute/v2_51/servers.py b/tempest/lib/api_schema/response/compute/v2_51/servers.py
index e603287..50d6aaa 100644
--- a/tempest/lib/api_schema/response/compute/v2_51/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_51/servers.py
@@ -40,3 +40,4 @@
 attach_volume = copy.deepcopy(servers248.attach_volume)
 show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
+create_backup = copy.deepcopy(servers248.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_54/servers.py b/tempest/lib/api_schema/response/compute/v2_54/servers.py
index 135b381..9de3016 100644
--- a/tempest/lib/api_schema/response/compute/v2_54/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_54/servers.py
@@ -59,3 +59,4 @@
 show_volume_attachment = copy.deepcopy(servers251.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers251.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers251.show_instance_action)
+create_backup = copy.deepcopy(servers251.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py b/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py
new file mode 100644
index 0000000..396ed66
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_57/quota_classes.py
@@ -0,0 +1,37 @@
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+from tempest.lib.api_schema.response.compute.v2_50 import quota_classes \
+    as quota_classesv250
+
+# Compute microversion 2.57:
+# 1. injected_file_content_bytes, injected_file_path_bytes, injected_files
+#    are removed from:
+#      * GET /os-quota-class-sets/{id}
+#      * PUT /os-quota-class-sets/{id}
+
+get_quota_class_set = copy.deepcopy(quota_classesv250.get_quota_class_set)
+update_quota_class_set = copy.deepcopy(
+    quota_classesv250.update_quota_class_set)
+for field in ['injected_file_content_bytes', 'injected_file_path_bytes',
+              'injected_files']:
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    get_quota_class_set['response_body']['properties']['quota_class_set'][
+        'required'].remove(field)
+    update_quota_class_set['response_body']['properties']['quota_class_set'][
+        'properties'].pop(field, None)
+    update_quota_class_set['response_body']['properties'][
+        'quota_class_set']['required'].remove(field)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
index bdff74b..ee91391 100644
--- a/tempest/lib/api_schema/response/compute/v2_57/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -63,3 +63,4 @@
 show_volume_attachment = copy.deepcopy(servers254.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers254.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers254.show_instance_action)
+create_backup = copy.deepcopy(servers254.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_58/servers.py b/tempest/lib/api_schema/response/compute/v2_58/servers.py
index 62239cf..637b765 100644
--- a/tempest/lib/api_schema/response/compute/v2_58/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_58/servers.py
@@ -42,3 +42,4 @@
 attach_volume = copy.deepcopy(servers257.attach_volume)
 show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
+create_backup = copy.deepcopy(servers257.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index 6103b7c..e6b2c32 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -32,6 +32,7 @@
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
 
 # NOTE: The consolidated remote console API got introduced with v2.6
 # with bp/consolidate-console-api. See Nova commit 578bafeda
diff --git a/tempest/lib/api_schema/response/compute/v2_62/servers.py b/tempest/lib/api_schema/response/compute/v2_62/servers.py
index 23eebbb..d761fe9 100644
--- a/tempest/lib/api_schema/response/compute/v2_62/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_62/servers.py
@@ -45,3 +45,4 @@
 attach_volume = copy.deepcopy(servers258.attach_volume)
 show_volume_attachment = copy.deepcopy(servers258.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers258.list_volume_attachments)
+create_backup = copy.deepcopy(servers258.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
index db713b1..865b4fd 100644
--- a/tempest/lib/api_schema/response/compute/v2_63/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -77,3 +77,4 @@
 show_volume_attachment = copy.deepcopy(servers262.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers262.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers262.show_instance_action)
+create_backup = copy.deepcopy(servers262.create_backup)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/compute/v2_64/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_64/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_64/server_groups.py b/tempest/lib/api_schema/response/compute/v2_64/server_groups.py
new file mode 100644
index 0000000..1402de5
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_64/server_groups.py
@@ -0,0 +1,56 @@
+# Copyright 2020 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_13 import server_groups as \
+    server_groupsv213
+
+# Compute microversion 2.64:
+# 1. change policies to policy in:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+# 2. add rules in:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+# 3. remove metadata from:
+#   * GET /os-server-groups
+#   * POST /os-server-groups
+#   * GET /os-server-groups/{server_group_id}
+
+common_server_group = copy.deepcopy(server_groupsv213.common_server_group)
+common_server_group['properties']['policy'] = {'type': 'string'}
+common_server_group['properties']['rules'] = {'type': 'object'}
+common_server_group['properties'].pop('policies')
+common_server_group['properties'].pop('metadata')
+common_server_group['required'].append('policy')
+common_server_group['required'].append('rules')
+common_server_group['required'].remove('policies')
+common_server_group['required'].remove('metadata')
+
+create_show_server_group = copy.deepcopy(
+    server_groupsv213.create_show_server_group)
+create_show_server_group['response_body']['properties'][
+    'server_group'] = common_server_group
+
+list_server_groups = copy.deepcopy(server_groupsv213.list_server_groups)
+list_server_groups['response_body']['properties']['server_groups'][
+    'items'] = common_server_group
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+delete_server_group = copy.deepcopy(server_groupsv213.delete_server_group)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/servers.py b/tempest/lib/api_schema/response/compute/v2_70/servers.py
index 6103923..6bb688a 100644
--- a/tempest/lib/api_schema/response/compute/v2_70/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_70/servers.py
@@ -79,3 +79,4 @@
 update_tag = copy.deepcopy(servers263.update_tag)
 delete_tag = copy.deepcopy(servers263.delete_tag)
 show_instance_action = copy.deepcopy(servers263.show_instance_action)
+create_backup = copy.deepcopy(servers263.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 3e55c1c..b1c202b 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -83,3 +83,4 @@
 show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers270.show_instance_action)
+create_backup = copy.deepcopy(servers270.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index e7a1d87..89f100d 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -80,3 +80,4 @@
 show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers271.show_instance_action)
+create_backup = copy.deepcopy(servers271.create_backup)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/compute/v2_75/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_75/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_75/servers.py b/tempest/lib/api_schema/response/compute/v2_75/servers.py
new file mode 100644
index 0000000..6b3e93d
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_75/servers.py
@@ -0,0 +1,64 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+from tempest.lib.api_schema.response.compute.v2_73 import servers as servers273
+
+
+###########################################################################
+#
+# 2.75:
+#
+# Server representation is made consistent among GET, PUT
+# and Rebuild serevr APIs response.
+#
+###########################################################################
+
+rebuild_server = copy.deepcopy(servers273.get_server)
+rebuild_server['response_body']['properties']['server'][
+    'properties'].pop('OS-EXT-SRV-ATTR:user_data')
+rebuild_server['status_code'] = [202]
+rebuild_server['response_body']['properties']['server'][
+    'properties'].update({'user_data': servers257.user_data})
+rebuild_server['response_body']['properties']['server'][
+    'required'].append('user_data')
+
+rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'adminPass': {'type': 'string'}})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('adminPass')
+
+update_server = copy.deepcopy(servers273.get_server)
+
+# NOTE(gmann): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.73 ***
+get_server = copy.deepcopy(servers273.get_server)
+list_servers = copy.deepcopy(servers273.list_servers)
+list_servers_detail = copy.deepcopy(servers273.list_servers_detail)
+show_server_diagnostics = copy.deepcopy(servers273.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers273.get_remote_consoles)
+list_tags = copy.deepcopy(servers273.list_tags)
+update_all_tags = copy.deepcopy(servers273.update_all_tags)
+delete_all_tags = copy.deepcopy(servers273.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
+update_tag = copy.deepcopy(servers273.update_tag)
+delete_tag = copy.deepcopy(servers273.delete_tag)
+attach_volume = copy.deepcopy(servers273.attach_volume)
+show_volume_attachment = copy.deepcopy(servers273.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers273.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers273.show_instance_action)
+create_backup = copy.deepcopy(servers273.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_79/servers.py b/tempest/lib/api_schema/response/compute/v2_79/servers.py
index b5507f9..77d9beb 100644
--- a/tempest/lib/api_schema/response/compute/v2_79/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_79/servers.py
@@ -12,7 +12,7 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_73 import servers as servers273
+from tempest.lib.api_schema.response.compute.v2_75 import servers as servers275
 
 
 ###########################################################################
@@ -27,19 +27,19 @@
 # - POST /servers/{server_id}/os-volume_attachments
 ###########################################################################
 
-attach_volume = copy.deepcopy(servers273.attach_volume)
+attach_volume = copy.deepcopy(servers275.attach_volume)
 attach_volume['response_body']['properties']['volumeAttachment'][
     'properties'].update({'delete_on_termination': {'type': 'boolean'}})
 attach_volume['response_body']['properties']['volumeAttachment'][
     'required'].append('delete_on_termination')
 
-show_volume_attachment = copy.deepcopy(servers273.show_volume_attachment)
+show_volume_attachment = copy.deepcopy(servers275.show_volume_attachment)
 show_volume_attachment['response_body']['properties']['volumeAttachment'][
     'properties'].update({'delete_on_termination': {'type': 'boolean'}})
 show_volume_attachment['response_body']['properties'][
     'volumeAttachment']['required'].append('delete_on_termination')
 
-list_volume_attachments = copy.deepcopy(servers273.list_volume_attachments)
+list_volume_attachments = copy.deepcopy(servers275.list_volume_attachments)
 list_volume_attachments['response_body']['properties']['volumeAttachments'][
     'items']['properties'].update(
         {'delete_on_termination': {'type': 'boolean'}})
@@ -49,20 +49,21 @@
 # NOTE(zhufl): Below are the unchanged schema in this microversion. We
 # need to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
-# ****** Schemas unchanged since microversion 2.73 ***
-rebuild_server = copy.deepcopy(servers273.rebuild_server)
+# ****** Schemas unchanged since microversion 2.75 ***
+rebuild_server = copy.deepcopy(servers275.rebuild_server)
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers273.rebuild_server_with_admin_pass)
-update_server = copy.deepcopy(servers273.update_server)
-get_server = copy.deepcopy(servers273.get_server)
-list_servers_detail = copy.deepcopy(servers273.list_servers_detail)
-list_servers = copy.deepcopy(servers273.list_servers)
-show_server_diagnostics = copy.deepcopy(servers273.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers273.get_remote_consoles)
-list_tags = copy.deepcopy(servers273.list_tags)
-update_all_tags = copy.deepcopy(servers273.update_all_tags)
-delete_all_tags = copy.deepcopy(servers273.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
-update_tag = copy.deepcopy(servers273.update_tag)
-delete_tag = copy.deepcopy(servers273.delete_tag)
-show_instance_action = copy.deepcopy(servers273.show_instance_action)
+    servers275.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers275.update_server)
+get_server = copy.deepcopy(servers275.get_server)
+list_servers_detail = copy.deepcopy(servers275.list_servers_detail)
+list_servers = copy.deepcopy(servers275.list_servers)
+show_server_diagnostics = copy.deepcopy(servers275.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers275.get_remote_consoles)
+list_tags = copy.deepcopy(servers275.list_tags)
+update_all_tags = copy.deepcopy(servers275.update_all_tags)
+delete_all_tags = copy.deepcopy(servers275.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers275.check_tag_existence)
+update_tag = copy.deepcopy(servers275.update_tag)
+delete_tag = copy.deepcopy(servers275.delete_tag)
+show_instance_action = copy.deepcopy(servers275.show_instance_action)
+create_backup = copy.deepcopy(servers275.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_8/servers.py b/tempest/lib/api_schema/response/compute/v2_8/servers.py
index 119d8e2..366fb1b 100644
--- a/tempest/lib/api_schema/response/compute/v2_8/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_8/servers.py
@@ -39,3 +39,4 @@
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index 9258eec..b4c7865 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -58,3 +58,4 @@
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
 show_instance_action = copy.deepcopy(servers.show_instance_action)
+create_backup = copy.deepcopy(servers.create_backup)
diff --git a/tempest/lib/base.py b/tempest/lib/base.py
index 74ae77c..3be55c0 100644
--- a/tempest/lib/base.py
+++ b/tempest/lib/base.py
@@ -14,29 +14,11 @@
 #    under the License.
 
 import os
-import sys
 
 import fixtures
-import pkg_resources
 import testtools
 
 
-def _handle_skip_exception():
-    try:
-        stestr_version = pkg_resources.parse_version(
-            pkg_resources.get_distribution("stestr").version)
-        stestr_min = pkg_resources.parse_version('2.5.0')
-        new_stestr = (stestr_version >= stestr_min)
-        import unittest
-        import unittest2
-        if sys.version_info >= (3, 5) and new_stestr:
-            testtools.TestCase.skipException = unittest.case.SkipTest
-        else:
-            testtools.TestCase.skipException = unittest2.case.SkipTest
-    except Exception:
-        pass
-
-
 class BaseTestCase(testtools.testcase.WithAttributes, testtools.TestCase):
     setUpClassCalled = False
 
@@ -51,18 +33,6 @@
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
         cls.setUpClassCalled = True
-        # TODO(gmann): cls.handle_skip_exception is really workaround for
-        # testtools bug- https://github.com/testing-cabal/testtools/issues/272
-        # stestr which is used by Tempest internally to run the test switch
-        # the customize test runner(which use stdlib unittest) for >=py3.5
-        # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
-        # These two test runner are not compatible due to skip exception
-        # handling(due to unittest2). testtools.run treat unittestt.SkipTest
-        # as error and stdlib unittest treat unittest2.case.SkipTest raised
-        # by testtools.TestCase.skipException.
-        # The below workaround can be removed once testtools fix issue# 272.
-        cls.orig_skip_exception = testtools.TestCase.skipException
-        _handle_skip_exception()
 
     @classmethod
     def tearDownClass(cls):
@@ -70,7 +40,6 @@
             super(BaseTestCase, cls).tearDownClass()
 
     def setUp(self):
-        testtools.TestCase.skipException = self.orig_skip_exception
         super(BaseTestCase, self).setUp()
         if not self.setUpClassCalled:
             raise RuntimeError("setUpClass does not calls the super's "
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index ef14dfc..fc86914 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -414,6 +414,11 @@
                 return resp[i]
         return ""
 
+    def _get_global_request_id(self, resp):
+        if 'x-openstack-request-id' in resp:
+            return resp['x-openstack-request-id']
+        return ''
+
     def _safe_body(self, body, maxlen=4096):
         # convert a structure into a string safely
         try:
@@ -461,7 +466,10 @@
         if req_headers is None:
             req_headers = {}
         # if we have the request id, put it in the right part of the log
-        extra = dict(request_id=self._get_request_id(resp))
+        extra = {
+            'request_id': self._get_request_id(resp),
+            'global_request_id': self._get_global_request_id(resp),
+        }
         # NOTE(sdague): while we still have 6 callers to this function
         # we're going to just provide work around on who is actually
         # providing timings by gracefully adding no content if they don't.
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index 224f3bf..d0cdc25 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -75,7 +75,7 @@
         :param ip_address: IP address to ssh to
         :param username: Ssh username
         :param password: Ssh password
-        :param pkey: Ssh public key
+        :param pkey: Ssh private key
         :param server: Server dict, used for debugging purposes
         :param servers_client: Servers client, used for debugging purposes
         :param ssh_timeout: Timeout in seconds to wait for the ssh banner
diff --git a/tempest/lib/services/compute/quota_classes_client.py b/tempest/lib/services/compute/quota_classes_client.py
index 9b64099..5f220a7 100644
--- a/tempest/lib/services/compute/quota_classes_client.py
+++ b/tempest/lib/services/compute/quota_classes_client.py
@@ -16,20 +16,30 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1\
-    import quota_classes as classes_schema
+    import quota_classes as schema
+from tempest.lib.api_schema.response.compute.v2_50 import quota_classes \
+    as schemav250
+from tempest.lib.api_schema.response.compute.v2_57 import quota_classes \
+    as schemav257
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
 
 class QuotaClassesClient(base_compute_client.BaseComputeClient):
 
+    schema_versions_info = [
+        {'min': None, 'max': '2.49', 'schema': schema},
+        {'min': '2.50', 'max': '2.56', 'schema': schemav250},
+        {'min': '2.57', 'max': None, 'schema': schemav257}]
+
     def show_quota_class_set(self, quota_class_id):
         """List the quota class set for a quota class."""
 
         url = 'os-quota-class-sets/%s' % quota_class_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.validate_response(classes_schema.get_quota_class_set, resp, body)
+        _schema = self.get_schema(self.schema_versions_info)
+        self.validate_response(_schema.get_quota_class_set, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_quota_class_set(self, quota_class_id, **kwargs):
@@ -45,6 +55,7 @@
                               post_body)
 
         body = json.loads(body)
-        self.validate_response(classes_schema.update_quota_class_set,
+        _schema = self.get_schema(self.schema_versions_info)
+        self.validate_response(_schema.update_quota_class_set,
                                resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/server_groups_client.py b/tempest/lib/services/compute/server_groups_client.py
index 89ad2d9..9895653 100644
--- a/tempest/lib/services/compute/server_groups_client.py
+++ b/tempest/lib/services/compute/server_groups_client.py
@@ -20,6 +20,8 @@
     as schema
 from tempest.lib.api_schema.response.compute.v2_13 import server_groups \
     as schemav213
+from tempest.lib.api_schema.response.compute.v2_64 import server_groups \
+    as schemav264
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -28,7 +30,8 @@
 
     schema_versions_info = [
         {'min': None, 'max': '2.12', 'schema': schema},
-        {'min': '2.13', 'max': None, 'schema': schemav213}]
+        {'min': '2.13', 'max': '2.63', 'schema': schemav213},
+        {'min': '2.64', 'max': None, 'schema': schemav264}]
 
     def create_server_group(self, **kwargs):
         """Create the server group.
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index ed3d4c0..d2bdb6e 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -27,6 +27,7 @@
 from tempest.lib.api_schema.response.compute.v2_19 import servers as schemav219
 from tempest.lib.api_schema.response.compute.v2_26 import servers as schemav226
 from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
+from tempest.lib.api_schema.response.compute.v2_45 import servers as schemav245
 from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
 from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
 from tempest.lib.api_schema.response.compute.v2_51 import servers as schemav251
@@ -39,6 +40,7 @@
 from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
 from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
 from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
+from tempest.lib.api_schema.response.compute.v2_75 import servers as schemav275
 from tempest.lib.api_schema.response.compute.v2_79 import servers as schemav279
 from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
 from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
@@ -57,7 +59,8 @@
         {'min': '2.9', 'max': '2.15', 'schema': schemav29},
         {'min': '2.16', 'max': '2.18', 'schema': schemav216},
         {'min': '2.19', 'max': '2.25', 'schema': schemav219},
-        {'min': '2.26', 'max': '2.46', 'schema': schemav226},
+        {'min': '2.26', 'max': '2.44', 'schema': schemav226},
+        {'min': '2.45', 'max': '2.46', 'schema': schemav245},
         {'min': '2.47', 'max': '2.47', 'schema': schemav247},
         {'min': '2.48', 'max': '2.50', 'schema': schemav248},
         {'min': '2.51', 'max': '2.53', 'schema': schemav251},
@@ -68,7 +71,8 @@
         {'min': '2.63', 'max': '2.69', 'schema': schemav263},
         {'min': '2.70', 'max': '2.70', 'schema': schemav270},
         {'min': '2.71', 'max': '2.72', 'schema': schemav271},
-        {'min': '2.73', 'max': '2.78', 'schema': schemav273},
+        {'min': '2.73', 'max': '2.74', 'schema': schemav273},
+        {'min': '2.75', 'max': '2.78', 'schema': schemav275},
         {'min': '2.79', 'max': None, 'schema': schemav279}]
 
     def __init__(self, auth_provider, service, region,
@@ -235,7 +239,9 @@
         API reference:
         https://docs.openstack.org/api-ref/compute/#create-server-back-up-createbackup-action
         """
-        return self.action(server_id, "createBackup", **kwargs)
+        schema = self.get_schema(self.schema_versions_info)
+        return self.action(server_id, "createBackup",
+                           schema.create_backup, **kwargs)
 
     def change_password(self, server_id, **kwargs):
         """Change the root password for the server.
diff --git a/tempest/lib/services/image/v2/namespaces_client.py b/tempest/lib/services/image/v2/namespaces_client.py
index c0fa74a..886ef00 100644
--- a/tempest/lib/services/image/v2/namespaces_client.py
+++ b/tempest/lib/services/image/v2/namespaces_client.py
@@ -16,6 +16,7 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
+from tempest.lib import exceptions as lib_exc
 
 
 class NamespacesClient(rest_client.RestClient):
@@ -60,6 +61,13 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def is_resource_deleted(self, id):
+        try:
+            self.show_namespace(id)
+        except lib_exc.NotFound:
+            return True
+        return False
+
     def update_namespace(self, namespace, **kwargs):
         """Update a namespace.
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 7aa96b2..73ce08f 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -321,11 +321,12 @@
         return server
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
-                      imageRef=None, volume_type=None, **kwargs):
+                      imageRef=None, volume_type=None, wait_until='available',
+                      **kwargs):
         """Creates volume
 
         This wrapper utility creates volume and waits for volume to be
-        in 'available' state.
+        in 'available' state by default. If wait_until is None, means no wait.
         This method returns the volume's full representation by GET request.
         """
 
@@ -358,11 +359,12 @@
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.volumes_client.delete_volume, volume['id'])
         self.assertEqual(name, volume['name'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'available')
-        # The volume retrieved on creation has a non-up-to-date status.
-        # Retrieval after it becomes active ensures correct details.
-        volume = self.volumes_client.show_volume(volume['id'])['volume']
+        if wait_until:
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume['id'], wait_until)
+            # The volume retrieved on creation has a non-up-to-date status.
+            # Retrieval after it becomes active ensures correct details.
+            volume = self.volumes_client.show_volume(volume['id'])['volume']
         return volume
 
     def create_backup(self, volume_id, name=None, description=None,
@@ -417,8 +419,12 @@
 
         body = self.backups_client.restore_backup(backup_id, **kwargs)
         restore = body['restore']
-        self.addCleanup(self.volumes_client.delete_volume,
-                        restore['volume_id'])
+
+        using_pre_existing_volume = kwargs.get('volume_id', False)
+        if not using_pre_existing_volume:
+            self.addCleanup(self.volumes_client.delete_volume,
+                            restore['volume_id'])
+
         waiters.wait_for_volume_resource_status(self.backups_client,
                                                 backup_id, 'available')
         waiters.wait_for_volume_resource_status(self.volumes_client,
@@ -471,7 +477,8 @@
 
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
-        self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.snapshots_client.delete_snapshot, snapshot['id'])
         waiters.wait_for_volume_resource_status(self.snapshots_client,
                                                 snapshot['id'], 'available')
         snapshot = self.snapshots_client.show_snapshot(
@@ -641,7 +648,8 @@
 
     def create_loginable_secgroup_rule(self, security_group_rules_client=None,
                                        secgroup=None,
-                                       security_groups_client=None):
+                                       security_groups_client=None,
+                                       rulesets=None):
         """Create loginable security group rule by neutron clients by default.
 
         This function will create:
@@ -655,24 +663,26 @@
             security_group_rules_client = self.security_group_rules_client
         if security_groups_client is None:
             security_groups_client = self.security_groups_client
+        if rulesets is None:
+            rulesets = [
+                dict(
+                    # ssh
+                    protocol='tcp',
+                    port_range_min=22,
+                    port_range_max=22,
+                ),
+                dict(
+                    # ping
+                    protocol='icmp',
+                ),
+                dict(
+                    # ipv6-icmp for ping6
+                    protocol='icmp',
+                    ethertype='IPv6',
+                )
+            ]
+
         rules = []
-        rulesets = [
-            dict(
-                # ssh
-                protocol='tcp',
-                port_range_min=22,
-                port_range_max=22,
-            ),
-            dict(
-                # ping
-                protocol='icmp',
-            ),
-            dict(
-                # ipv6-icmp for ping6
-                protocol='icmp',
-                ethertype='IPv6',
-            )
-        ]
         sec_group_rules_client = security_group_rules_client
         for ruleset in rulesets:
             for r_direction in ['ingress', 'egress']:
diff --git a/tempest/scenario/test_compute_unified_limits.py b/tempest/scenario/test_compute_unified_limits.py
new file mode 100644
index 0000000..eda6d6f
--- /dev/null
+++ b/tempest/scenario/test_compute_unified_limits.py
@@ -0,0 +1,166 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class ComputeProjectQuotaTest(manager.ScenarioTest):
+    """The test base class for compute unified limits tests.
+
+    Dynamic credentials (unique tenants) are created on a per-class basis, so
+    we test different quota limits in separate test classes to prevent a quota
+    limit update in one test class from affecting a test running in another
+    test class in parallel.
+
+    https://docs.openstack.org/tempest/latest/configuration.html#dynamic-credentials
+    """
+    credentials = ['primary', 'system_admin']
+    force_tenant_isolation = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(ComputeProjectQuotaTest, cls).skip_checks()
+        if not CONF.compute_feature_enabled.unified_limits:
+            raise cls.skipException('Compute unified limits are not enabled.')
+
+    @classmethod
+    def resource_setup(cls):
+        super(ComputeProjectQuotaTest, cls).resource_setup()
+
+        # Figure out and record the nova service id
+        services = cls.os_system_admin.identity_services_v3_client.\
+            list_services()
+        nova_services = [x for x in services['services']
+                         if x['name'] == 'nova']
+        cls.nova_service_id = nova_services[0]['id']
+
+        # Pre-create quota limits in subclasses and record their IDs so we can
+        # update them in-place without needing to know which ones have been
+        # created and in which order.
+        cls.limit_ids = {}
+
+    @classmethod
+    def _create_limit(cls, name, value):
+        return cls.os_system_admin.identity_limits_client.create_limit(
+            CONF.identity.region, cls.nova_service_id,
+            cls.servers_client.tenant_id, name, value)['limits'][0]['id']
+
+    def _update_limit(self, name, value):
+        self.os_system_admin.identity_limits_client.update_limit(
+            self.limit_ids[name], value)
+
+
+class ServersQuotaTest(ComputeProjectQuotaTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(ServersQuotaTest, cls).resource_setup()
+
+        try:
+            cls.limit_ids['servers'] = cls._create_limit(
+                'servers', 5)
+            cls.limit_ids['class:VCPU'] = cls._create_limit(
+                'class:VCPU', 10)
+            cls.limit_ids['class:MEMORY_MB'] = cls._create_limit(
+                'class:MEMORY_MB', 25 * 1024)
+            cls.limit_ids['class:DISK_GB'] = cls._create_limit(
+                'class:DISK_GB', 10)
+        except lib_exc.Forbidden:
+            raise cls.skipException('Target system is not configured with '
+                                    'compute unified limits')
+
+    @decorators.idempotent_id('555d8bbf-d2ed-4e39-858c-4235899402d9')
+    @utils.services('compute')
+    def test_server_count_vcpu_memory_disk_quota(self):
+        # Set a quota on the number of servers for our tenant to one.
+        self._update_limit('servers', 1)
+
+        # Create one server.
+        first = self.create_server(name='first')
+
+        # Second server would put us over quota, so expect failure.
+        # NOTE: In nova, quota exceeded raises 403 Forbidden.
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_server,
+                          name='second')
+
+        # Update our limit to two.
+        self._update_limit('servers', 2)
+
+        # Now the same create should succeed.
+        second = self.create_server(name='second')
+
+        # Third server would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_server,
+                          name='third')
+
+        # Delete the first server to put us under quota.
+        self.servers_client.delete_server(first['id'])
+        waiters.wait_for_server_termination(self.servers_client, first['id'])
+
+        # Now the same create should succeed.
+        third = self.create_server(name='third')
+
+        # Set the servers limit back to 10 to test other resources.
+        self._update_limit('servers', 10)
+
+        # Default flavor has: VCPU=1, MEMORY_MB=512, DISK_GB=1
+        # We are currently using 2 VCPU, set the limit to 2.
+        self._update_limit('class:VCPU', 2)
+
+        # Server create should fail as it would go over quota.
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_server,
+                          name='fourth')
+
+        # Delete the second server to put us under quota.
+        self.servers_client.delete_server(second['id'])
+        waiters.wait_for_server_termination(self.servers_client, second['id'])
+
+        # Same create should now succeed.
+        fourth = self.create_server(name='fourth')
+
+        # We are currently using 2 DISK_GB. Set limit to 1.
+        self._update_limit('class:DISK_GB', 1)
+
+        # Server create should fail because we're already over (new) quota.
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_server,
+                          name='fifth')
+
+        # Delete the third server.
+        self.servers_client.delete_server(third['id'])
+        waiters.wait_for_server_termination(self.servers_client, third['id'])
+
+        # Server create should fail again because it would still put us over
+        # quota.
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_server,
+                          name='fifth')
+
+        # Delete the fourth server.
+        self.servers_client.delete_server(fourth['id'])
+        waiters.wait_for_server_termination(self.servers_client, fourth['id'])
+
+        # Server create should succeed now.
+        self.create_server(name='fifth')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 5aac19c..8cafd1f 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.common import custom_matchers
 from tempest.common import utils
 from tempest.common import waiters
@@ -29,24 +31,11 @@
 
     """This is a basic minimum scenario test.
 
-    This test below:
+    These tests below:
     * across the multiple components
     * as a regular user
     * with and without optional parameters
     * check command outputs
-
-    Steps:
-    1. Create image
-    2. Create keypair
-    3. Boot instance with keypair and get list of instances
-    4. Create volume and show list of volumes
-    5. Attach volume to instance and getlist of volumes
-    6. Add IP to instance
-    7. Create and add security group to instance
-    8. Check SSH connection to instance
-    9. Reboot instance
-    10. Check SSH connection to instance after reboot
-
     """
 
     def nova_show(self, server):
@@ -67,8 +56,9 @@
             volume, custom_matchers.MatchesDictExceptForKeys(
                 got_volume, excluded_keys=excluded_keys))
 
-    def nova_reboot(self, server):
-        self.servers_client.reboot_server(server['id'], type='SOFT')
+    def nova_reboot(self, server, hard=False):
+        self.servers_client.reboot_server(server['id'],
+                                          type="HARD" if hard else "SOFT")
         waiters.wait_for_server_status(self.servers_client,
                                        server['id'], 'ACTIVE')
 
@@ -96,9 +86,37 @@
                    '%s' % (secgroup['id'], server['id']))
             raise exceptions.TimeoutException(msg)
 
+    def _get_floating_ip_in_server_addresses(self, floating_ip, server):
+        for addresses in server['addresses'].values():
+            for address in addresses:
+                if (address['OS-EXT-IPS:type'] == 'floating' and
+                        address['addr'] == floating_ip['floating_ip_address']):
+                    return address
+
+    def _is_floating_ip_detached_from_server(self, server, floating_ip):
+        server_info = self.servers_client.show_server(
+            server['id'])['server']
+        address = self._get_floating_ip_in_server_addresses(
+            floating_ip, server_info)
+        return (not address)
+
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
+        """This is a basic minimum scenario with multiple components
+
+        Steps:
+        1. Create image
+        2. Create keypair
+        3. Boot instance with keypair and get list of instances
+        4. Create volume and show list of volumes
+        5. Attach volume to instance and getlist of volumes
+        6. Add IP to instance
+        7. Create and add security group to instance
+        8. Check SSH connection to instance
+        9. Reboot instance
+        10. Check SSH connection to instance after reboot
+        """
         image = self.image_create()
         keypair = self.create_keypair()
 
@@ -121,7 +139,7 @@
         floating_ip = None
         server = self.servers_client.show_server(server['id'])['server']
         if (CONF.network_feature_enabled.floating_ips and
-            CONF.network.floating_network_name):
+                CONF.network.floating_network_name):
             fip = self.create_floating_ip(server)
             floating_ip = self.associate_floating_ip(
                 fip, server)
@@ -154,3 +172,116 @@
             waiters.wait_for_server_floating_ip(
                 self.servers_client, server, floating_ip,
                 wait_for_disassociate=True)
+
+            if not test_utils.call_until_true(
+                    self._is_floating_ip_detached_from_server,
+                    CONF.compute.build_timeout,
+                    CONF.compute.build_interval, server, floating_ip):
+                msg = ("Floating IP '%s' should not be in server addresses: %s"
+                       % (floating_ip['floating_ip_address'],
+                          server['addresses']))
+                raise exceptions.TimeoutException(msg)
+
+    @decorators.idempotent_id('a8fd48ec-1d01-4895-b932-02321661ec1e')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          "Cinder volume snapshots are disabled")
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_minimum_basic_instance_hard_reboot_after_vol_snap_deletion(self):
+        """Test compute hard reboot after volume snapshot deleted
+
+        Steps:
+        1. Create image
+        2. Create keypair
+        3. Boot instance with keypair and get list of instances
+        4. Create volume and show list of volumes
+        5. Attach volume to instance and getlist of volumes
+        6. Create a snapshot from volume
+        7. Add IP to instance
+        8. Create and add security group to instance
+        9. Check SSH connection to instance
+        10. Write data timestamp to the attached volume
+        11. Delete volume snapshot before reboot instance
+        12. Reboot instance (HARD)
+        13. Check SSH connection to instance after reboot
+        14. Verify attached disk data timestamp post instance reboot
+        """
+        image = self.image_create()
+        keypair = self.create_keypair()
+
+        server = self.create_server(image_id=image, key_name=keypair['name'])
+        servers = self.servers_client.list_servers()['servers']
+        self.assertIn(server['id'], [x['id'] for x in servers])
+
+        self.nova_show(server)
+
+        volume = self.create_volume()
+        volumes = self.volumes_client.list_volumes()['volumes']
+        self.assertIn(volume['id'], [x['id'] for x in volumes])
+
+        self.cinder_show(volume)
+
+        volume = self.nova_volume_attach(server, volume)
+        self.addCleanup(self.nova_volume_detach, server, volume)
+        snapshot = self.create_volume_snapshot(volume['id'], force=True)
+        self.cinder_show(volume)
+
+        floating_ip = None
+        server = self.servers_client.show_server(server['id'])['server']
+        if (CONF.network_feature_enabled.floating_ips and
+                CONF.network.floating_network_name):
+            fip = self.create_floating_ip(server)
+            floating_ip = self.associate_floating_ip(
+                fip, server)
+            # fetch the server again to make sure the addresses were refreshed
+            # after associating the floating IP
+            server = self.servers_client.show_server(server['id'])['server']
+            address = self._get_floating_ip_in_server_addresses(
+                floating_ip, server)
+            self.assertIsNotNone(
+                address,
+                "Failed to find floating IP '%s' in server addresses: %s" %
+                (floating_ip['floating_ip_address'], server['addresses']))
+            ssh_ip = floating_ip['floating_ip_address']
+        else:
+            ssh_ip = self.get_server_ip(server)
+
+        self.create_and_add_security_group_to_server(server)
+
+        # check that we can SSH to the server before reboot
+        self.linux_client = self.get_remote_client(
+            ssh_ip, private_key=keypair['private_key'],
+            server=server)
+
+        # write data to the volume before reboot instance
+        timestamp_before = self.create_timestamp(
+            ssh_ip, private_key=keypair['private_key'], server=server)
+        # delete the snapshot before rebooting the instance
+        self.snapshots_client.delete_snapshot(snapshot['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+        self.nova_reboot(server, hard=True)
+
+        # check that we can SSH to the server after reboot
+        # (both connections are part of the scenario)
+        self.linux_client = self.get_remote_client(
+            ssh_ip, private_key=keypair['private_key'],
+            server=server)
+
+        self.check_disks()
+        timestamp_after = self.get_timestamp(
+            ssh_ip, private_key=keypair['private_key'], server=server)
+        self.assertEqual(timestamp_before, timestamp_after)
+        if floating_ip:
+            # delete the floating IP, this should refresh the server addresses
+            self.disassociate_floating_ip(floating_ip)
+            waiters.wait_for_server_floating_ip(
+                self.servers_client, server, floating_ip,
+                wait_for_disassociate=True)
+
+            if not test_utils.call_until_true(
+                self._is_floating_ip_detached_from_server,
+                    CONF.compute.build_timeout, CONF.compute.build_interval,
+                    server, floating_ip):
+                msg = ("Floating IP '%s' should not be in server addresses: %s"
+                       % (floating_ip['floating_ip_address'],
+                          server['addresses']))
+                raise exceptions.TimeoutException(msg)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index b48ac3c..1c00212 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,7 +15,9 @@
 
 import testtools
 
+from oslo_log import log
 from tempest.common import utils
+from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
@@ -23,6 +25,8 @@
 
 CONF = config.CONF
 
+LOG = log.getLogger(__name__)
+
 
 class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
     """Check VM connectivity after some advanced instance operations executed:
@@ -252,6 +256,11 @@
         block_migration = (CONF.compute_feature_enabled.
                            block_migration_for_live_migration)
         old_host = self.get_host_for_server(server['id'])
+
+        downtime_meter = net_downtime.NetDowntimeMeter(
+            floating_ip['floating_ip_address'])
+        self.useFixture(downtime_meter)
+
         self.admin_servers_client.live_migrate_server(
             server['id'], host=None, block_migration=block_migration,
             disk_over_commit=False)
@@ -261,6 +270,16 @@
         new_host = self.get_host_for_server(server['id'])
         self.assertNotEqual(old_host, new_host, 'Server did not migrate')
 
+        downtime = downtime_meter.get_downtime()
+        self.assertIsNotNone(downtime)
+        LOG.debug("Downtime seconds measured with downtime_meter = %r",
+                  downtime)
+        allowed_downtime = CONF.validation.allowed_network_downtime
+        self.assertLess(
+            downtime, allowed_downtime,
+            "Downtime of {} seconds is higher than expected '{}'".format(
+                downtime, allowed_downtime))
+
         self._wait_server_status_and_check_network_connectivity(
             server, keypair, floating_ip)
 
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index adb0ee3..365eb1b 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -79,7 +79,7 @@
         new_flavor = self.flavors_client.create_flavor(**{
             'ram': old_flavor['ram'],
             'vcpus': old_flavor['vcpus'],
-            'name': old_flavor['name'] + 'extra',
+            'name': old_flavor['name'] + 'extra-%s' % data_utils.rand_int_id(),
             'disk': old_flavor['disk'] + 1
         })['flavor']
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/services/orchestration/__init__.py b/tempest/services/orchestration/__init__.py
deleted file mode 100644
index 5a1ffcc..0000000
--- a/tempest/services/orchestration/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.services.orchestration.json.orchestration_client import \
-    OrchestrationClient
-
-__all__ = ['OrchestrationClient']
diff --git a/tempest/services/orchestration/json/__init__.py b/tempest/services/orchestration/json/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/orchestration/json/__init__.py
+++ /dev/null
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
deleted file mode 100644
index 0d7720e..0000000
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-from urllib import parse as urllib
-
-from oslo_serialization import jsonutils as json
-
-from tempest import exceptions
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class OrchestrationClient(rest_client.RestClient):
-
-    def list_stacks(self, params=None):
-        """Lists all stacks for a user."""
-
-        uri = 'stacks'
-        if params:
-            uri += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_stack(self, name, disable_rollback=True, parameters=None,
-                     timeout_mins=60, template=None, template_url=None,
-                     environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment,
-            files)
-        uri = 'stacks'
-        resp, body = self.post(uri, headers=headers, body=body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_stack(self, stack_identifier, name, disable_rollback=True,
-                     parameters=None, timeout_mins=60, template=None,
-                     template_url=None, environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment)
-
-        uri = "stacks/%s" % stack_identifier
-        resp, body = self.put(uri, headers=headers, body=body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def _prepare_update_create(self, name, disable_rollback=True,
-                               parameters=None, timeout_mins=60,
-                               template=None, template_url=None,
-                               environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            "stack_name": name,
-            "disable_rollback": disable_rollback,
-            "parameters": parameters,
-            "timeout_mins": timeout_mins,
-            "template": "HeatTemplateFormatVersion: '2012-12-12'\n",
-            "environment": environment,
-            "files": files
-        }
-        if template:
-            post_body['template'] = template
-        if template_url:
-            post_body['template_url'] = template_url
-        body = json.dumps(post_body)
-
-        # Password must be provided on stack create so that heat
-        # can perform future operations on behalf of the user
-        headers = self.get_headers()
-        headers['X-Auth-Key'] = self.password
-        headers['X-Auth-User'] = self.user
-        return headers, body
-
-    def show_stack(self, stack_identifier):
-        """Returns the details of a single stack."""
-        url = "stacks/%s" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def suspend_stack(self, stack_identifier):
-        """Suspend a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'suspend': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def resume_stack(self, stack_identifier):
-        """Resume a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'resume': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def list_resources(self, stack_identifier):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource(self, stack_identifier, resource_name):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_stack(self, stack_identifier):
-        """Deletes the specified Stack."""
-        resp, _ = self.delete("stacks/%s" % str(stack_identifier))
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def wait_for_stack_status(self, stack_identifier, status,
-                              failure_pattern='^.*_FAILED$'):
-        """Waits for a Stack to reach a given status."""
-        start = int(time.time())
-        fail_regexp = re.compile(failure_pattern)
-
-        while True:
-            try:
-                body = self.show_stack(stack_identifier)['stack']
-            except lib_exc.NotFound:
-                if status == 'DELETE_COMPLETE':
-                    return
-            stack_name = body['stack_name']
-            stack_status = body['stack_status']
-            if stack_status == status:
-                return body
-            if fail_regexp.search(stack_status):
-                raise exceptions.StackBuildErrorException(
-                    stack_identifier=stack_identifier,
-                    stack_status=stack_status,
-                    stack_status_reason=body['stack_status_reason'])
-
-            if int(time.time()) - start >= self.build_timeout:
-                message = ('Stack %s failed to reach %s status (current: %s) '
-                           'within the required time (%s s).' %
-                           (stack_name, status, stack_status,
-                            self.build_timeout))
-                raise lib_exc.TimeoutException(message)
-            time.sleep(self.build_interval)
-
-    def show_resource_metadata(self, stack_identifier, resource_name):
-        """Returns the resource's metadata."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/metadata'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_events(self, stack_identifier):
-        """Returns list of all events for a stack."""
-        url = 'stacks/{stack_identifier}/events'.format(**locals())
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_resource_events(self, stack_identifier, resource_name):
-        """Returns list of all events for a resource from stack."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/events'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_event(self, stack_identifier, resource_name, event_id):
-        """Returns the details of a single stack's event."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
-               '/{event_id}'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_template(self, stack_identifier):
-        """Returns the template for the stack."""
-        url = ('stacks/{stack_identifier}/template'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def _validate_template(self, post_body):
-        """Returns the validation request result."""
-        post_body = json.dumps(post_body)
-        resp, body = self.post('validate', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def validate_template(self, template, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template': template,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def validate_template_url(self, template_url, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template_url': template_url,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def list_resource_types(self):
-        """List resource types."""
-        resp, body = self.get('resource_types')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource_type(self, resource_type_name):
-        """Return the schema of a resource type."""
-        url = 'resource_types/%s' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def show_resource_type_template(self, resource_type_name):
-        """Return the template of a resource type."""
-        url = 'resource_types/%s/template' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def create_software_config(self, name=None, config=None, group=None,
-                               inputs=None, outputs=None, options=None):
-        headers, body = self._prep_software_config_create(
-            name, config, group, inputs, outputs, options)
-
-        url = 'software_configs'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_config(self, conf_id):
-        """Returns a software configuration resource."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_config(self, conf_id):
-        """Deletes a specific software configuration."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def create_software_deploy(self, server_id=None, config_id=None,
-                               action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            None, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_software_deploy(self, deploy_id=None, server_id=None,
-                               config_id=None, action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            deploy_id, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.put(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_software_deployments(self):
-        """Returns a list of all deployments."""
-        url = 'software_deployments'
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment(self, deploy_id):
-        """Returns a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment_metadata(self, server_id):
-        """Return a config metadata for a specific server."""
-        url = 'software_deployments/metadata/%s' % server_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_deploy(self, deploy_id):
-        """Deletes a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def _prep_software_config_create(self, name=None, conf=None, group=None,
-                                     inputs=None, outputs=None, options=None):
-        """Prepares a software configuration body."""
-        post_body = {}
-        if name is not None:
-            post_body["name"] = name
-        if conf is not None:
-            post_body["config"] = conf
-        if group is not None:
-            post_body["group"] = group
-        if inputs is not None:
-            post_body["inputs"] = inputs
-        if outputs is not None:
-            post_body["outputs"] = outputs
-        if options is not None:
-            post_body["options"] = options
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
-
-    def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
-                                     config_id=None, action=None, status=None,
-                                     input_values=None, output_values=None,
-                                     status_reason=None,
-                                     signal_transport=None):
-        """Prepares a deployment create or update (if an id was given)."""
-        post_body = {}
-
-        if deploy_id is not None:
-            post_body["id"] = deploy_id
-        if server_id is not None:
-            post_body["server_id"] = server_id
-        if config_id is not None:
-            post_body["config_id"] = config_id
-        if action is not None:
-            post_body["action"] = action
-        if status is not None:
-            post_body["status"] = status
-        if input_values is not None:
-            post_body["input_values"] = input_values
-        if output_values is not None:
-            post_body["output_values"] = output_values
-        if status_reason is not None:
-            post_body["status_reason"] = status_reason
-        if signal_transport is not None:
-            post_body["signal_transport"] = signal_transport
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
diff --git a/tempest/test.py b/tempest/test.py
index bf0aba4..dba2695 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,7 +26,6 @@
 from tempest.common import credentials_factory as credentials
 from tempest.common import utils
 from tempest import config
-from tempest.lib import base as lib_base
 from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import fixed_network
 from tempest.lib.common import profiler
@@ -142,19 +141,6 @@
         # It should never be overridden by descendants
         if hasattr(super(BaseTestCase, cls), 'setUpClass'):
             super(BaseTestCase, cls).setUpClass()
-        # All the configuration checks that may generate a skip
-        # TODO(gmann): cls.handle_skip_exception is really workaround for
-        # testtools bug- https://github.com/testing-cabal/testtools/issues/272
-        # stestr which is used by Tempest internally to run the test switch
-        # the customize test runner(which use stdlib unittest) for >=py3.5
-        # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
-        # These two test runner are not compatible due to skip exception
-        # handling(due to unittest2). testtools.run treat unittestt.SkipTest
-        # as error and stdlib unittest treat unittest2.case.SkipTest raised
-        # by testtools.TestCase.skipException.
-        # The below workaround can be removed once testtools fix issue# 272.
-        orig_skip_exception = testtools.TestCase.skipException
-        lib_base._handle_skip_exception()
         try:
             cls.skip_checks()
 
@@ -182,8 +168,6 @@
                 raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
-        finally:
-            testtools.TestCase.skipException = orig_skip_exception
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 5816ab1..a19f20b 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -13,15 +13,10 @@
 #    under the License.
 
 import os
-import sys
+import unittest
 
 from tempest.test_discover import plugins
 
-if sys.version_info >= (2, 7):
-    import unittest
-else:
-    import unittest2 as unittest
-
 
 def load_tests(loader, tests, pattern):
     ext_plugins = plugins.TempestTestPluginManager()
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a8a4c0f..05ea84e 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -380,70 +380,6 @@
         self.assertIn('extensions', results['cinder'])
         self.assertEqual([], results['cinder']['extensions'])
 
-    def test_verify_extensions_nova(self):
-        def fake_list_extensions():
-            return ([{'alias': 'fake1'}, {'alias': 'fake2'},
-                     {'alias': 'not_fake'}])
-        fake_os = mock.MagicMock()
-        fake_client = mock.MagicMock()
-        fake_client.list_extensions = fake_list_extensions
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_extension_client',
-            return_value=fake_client))
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_enabled_extensions',
-            return_value=(['fake1', 'fake2', 'fake3'])))
-        results = verify_tempest_config.verify_extensions(fake_os,
-                                                          'nova', {})
-        self.assertIn('nova', results)
-        self.assertIn('fake1', results['nova'])
-        self.assertTrue(results['nova']['fake1'])
-        self.assertIn('fake2', results['nova'])
-        self.assertTrue(results['nova']['fake2'])
-        self.assertIn('fake3', results['nova'])
-        self.assertFalse(results['nova']['fake3'])
-        self.assertIn('not_fake', results['nova'])
-        self.assertFalse(results['nova']['not_fake'])
-
-    def test_verify_extensions_nova_all(self):
-        def fake_list_extensions():
-            return ({'extensions': [{'alias': 'fake1'},
-                                    {'alias': 'fake2'},
-                                    {'alias': 'not_fake'}]})
-        fake_os = mock.MagicMock()
-        fake_client = mock.MagicMock()
-        fake_client.list_extensions = fake_list_extensions
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_extension_client',
-            return_value=fake_client))
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_enabled_extensions',
-            return_value=(['all'])))
-        results = verify_tempest_config.verify_extensions(fake_os,
-                                                          'nova', {})
-        self.assertIn('nova', results)
-        self.assertIn('extensions', results['nova'])
-        self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
-                         sorted(results['nova']['extensions']))
-
-    def test_verify_extensions_nova_none(self):
-        def fake_list_extensions():
-            return ({'extensions': []})
-        fake_os = mock.MagicMock()
-        fake_client = mock.MagicMock()
-        fake_client.list_extensions = fake_list_extensions
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_extension_client',
-            return_value=fake_client))
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, 'get_enabled_extensions',
-            return_value=(['all'])))
-        results = verify_tempest_config.verify_extensions(fake_os,
-                                                          'nova', {})
-        self.assertIn('nova', results)
-        self.assertIn('extensions', results['nova'])
-        self.assertEqual([], results['nova']['extensions'])
-
     def test_verify_extensions_swift(self):
         def fake_list_extensions():
             return {'fake1': 'metadata',
@@ -513,7 +449,6 @@
     def test_get_extension_client(self):
         fake_os = mock.MagicMock()
         services = {
-            'nova': fake_os.compute.ExtensionsClient(),
             'neutron': fake_os.network.ExtensionsClient(),
             'swift': fake_os.object_storage.CapabilitiesClient(),
             'cinder': fake_os.volume_v2.ExtensionsClient(),
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index b76a263..1d0ee77 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -276,36 +276,6 @@
         )
         sleep.assert_called_once_with(client.build_interval)
 
-    def test_wait_for_guest_os_boot(self):
-        get_console_output = mock.Mock(
-            side_effect=[
-                {'output': 'os not ready yet\n'},
-                {'output': 'login:\n'}
-            ])
-        client = self.mock_client(get_console_output=get_console_output)
-        self.patch('time.time', return_value=0.)
-        sleep = self.patch('time.sleep')
-
-        with mock.patch.object(waiters.LOG, "info") as log_info:
-            waiters.wait_for_guest_os_boot(client, 'server_id')
-
-        get_console_output.assert_has_calls([
-            mock.call('server_id'), mock.call('server_id')])
-        sleep.assert_called_once_with(client.build_interval)
-        log_info.assert_not_called()
-
-    def test_wait_for_guest_os_boot_timeout(self):
-        get_console_output = mock.Mock(
-            return_value={'output': 'os not ready yet\n'})
-        client = self.mock_client(get_console_output=get_console_output)
-        self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
-        self.patch('time.sleep')
-
-        with mock.patch.object(waiters.LOG, "info") as log_info:
-            waiters.wait_for_guest_os_boot(client, 'server_id')
-
-        log_info.assert_called_once()
-
 
 class TestVolumeWaiters(base.TestCase):
     vol_migrating_src_host = {
@@ -553,6 +523,70 @@
         mock_list_volume_attachments.assert_called_once_with(
             mock.sentinel.server_id)
 
+    @mock.patch('os.system')
+    def test_wait_for_ping_host_alive(self, mock_ping):
+        mock_ping.return_value = 0
+        # Assert that nothing is raised as the host is alive
+        waiters.wait_for_ping('127.0.0.1', 10, 1)
+
+    @mock.patch('os.system')
+    def test_wait_for_ping_host_eventually_alive(self, mock_ping):
+        mock_ping.side_effect = [1, 1, 0]
+        # Assert that nothing is raised when the host is eventually alive
+        waiters.wait_for_ping('127.0.0.1', 10, 1)
+
+    @mock.patch('os.system')
+    def test_wait_for_ping_timeout(self, mock_ping):
+        mock_ping.return_value = 1
+        # Assert that TimeoutException is raised when the host is dead
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_ping,
+            '127.0.0.1',
+            .1,
+            .1
+        )
+
+    def test_wait_for_ssh(self):
+        mock_ssh_client = mock.Mock()
+        mock_ssh_client.validate_authentication.return_value = True
+        # Assert that nothing is raised when validate_authentication returns
+        waiters.wait_for_ssh(mock_ssh_client, .1)
+        mock_ssh_client.validate_authentication.assert_called_once()
+
+    def test_wait_for_ssh_eventually_up(self):
+        mock_ssh_client = mock.Mock()
+        timeout = lib_exc.SSHTimeout(
+            host='foo',
+            username='bar',
+            password='fizz'
+        )
+        mock_ssh_client.validate_authentication.side_effect = [
+            timeout,
+            timeout,
+            True
+        ]
+        # Assert that nothing is raised if validate_authentication passes
+        # before the timeout
+        waiters.wait_for_ssh(mock_ssh_client, 10)
+
+    def test_wait_for_ssh_timeout(self):
+        mock_ssh_client = mock.Mock()
+        timeout = lib_exc.SSHTimeout(
+            host='foo',
+            username='bar',
+            password='fizz'
+        )
+        mock_ssh_client.validate_authentication.side_effect = timeout
+        # Assert that TimeoutException is raised when validate_authentication
+        # doesn't pass in time.
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_ssh,
+            mock_ssh_client,
+            .1
+        )
+
 
 class TestServerFloatingIPWaiters(base.TestCase):
 
diff --git a/tempest/tests/lib/services/image/v2/test_schemas_client.py b/tempest/tests/lib/services/image/v2/test_schemas_client.py
index eef5b41..9fb249b 100644
--- a/tempest/tests/lib/services/image/v2/test_schemas_client.py
+++ b/tempest/tests/lib/services/image/v2/test_schemas_client.py
@@ -75,6 +75,293 @@
         }
     }
 
+    FAKE_SHOW_SCHEMA_IMAGE = {
+        "additionalProperties": {
+            "type": "string"
+        },
+        "links": [
+            {
+                "href": "{self}",
+                "rel": "self"
+            },
+            {
+                "href": "{file}",
+                "rel": "enclosure"
+            },
+            {
+                "href": "{schema}",
+                "rel": "describedby"
+            }
+        ],
+        "name": "image",
+        "properties": {
+            "architecture": {
+                "description": "Operating system architecture as "
+                               "specified in https://docs.openstack.org/"
+                               "python-glanceclient/latest/cli"
+                               "/property-keys.html",
+                "is_base": False,
+                "type": "string"
+            },
+            "checksum": {
+                "description": "md5 hash of image contents.",
+                "maxLength": 32,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "container_format": {
+                "description": "Format of the container",
+                "enum": [
+                    None,
+                    "ami",
+                    "ari",
+                    "aki",
+                    "bare",
+                    "ovf",
+                    "ova",
+                    "docker"
+                ],
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "created_at": {
+                "description": "Date and time of image registration",
+                "readOnly": True,
+                "type": "string"
+            },
+            "direct_url": {
+                "description": "URL to access the image file "
+                               "kept in external store",
+                "readOnly": True,
+                "type": "string"
+            },
+            "disk_format": {
+                "description": "Format of the disk",
+                "enum": [
+                    None,
+                    "ami",
+                    "ari",
+                    "aki",
+                    "vhd",
+                    "vhdx",
+                    "vmdk",
+                    "raw",
+                    "qcow2",
+                    "vdi",
+                    "iso",
+                    "ploop"
+                ],
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "file": {
+                "description": "An image file url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "id": {
+                "description": "An identifier for the image",
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){12}$",
+                "type": "string"
+            },
+            "instance_uuid": {
+                "description": "Metadata which can be used to record which"
+                               " instance this image is associated with. "
+                               "(Informational only, does not create "
+                               "an instance snapshot.)",
+                "is_base": False,
+                "type": "string"
+            },
+            "kernel_id": {
+                "description": "ID of image stored in Glance that should "
+                               "be used as the kernel when booting an "
+                               "AMI-style image.",
+                "is_base": False,
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-"
+                           "([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-("
+                           "[0-9a-fA-F]){12}$",
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "locations": {
+                "description": "A set of URLs to access the image file "
+                               "kept in external store",
+                "items": {
+                    "properties": {
+                        "metadata": {
+                            "type": "object"
+                        },
+                        "url": {
+                            "maxLength": 255,
+                            "type": "string"
+                        }
+                    },
+                    "required": [
+                        "url",
+                        "metadata"
+                    ],
+                    "type": "object"
+                },
+                "type": "array"
+            },
+            "min_disk": {
+                "description": "Amount of disk space (in GB) "
+                               "required to boot image.",
+                "type": "integer"
+            },
+            "min_ram": {
+                "description": "Amount of ram (in MB) required "
+                               "to boot image.",
+                "type": "integer"
+            },
+            "name": {
+                "description": "Descriptive name for the image",
+                "maxLength": 255,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_distro": {
+                "description": "Common name of operating system distribution "
+                               "as specified in https://docs.openstack.org/"
+                               "python-glanceclient/latest/cli/"
+                               "property-keys.html",
+                "is_base": False,
+                "type": "string"
+            },
+            "os_hash_algo": {
+                "description": "Algorithm to calculate the os_hash_value",
+                "maxLength": 64,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_hash_value": {
+                "description": "Hexdigest of the image contents "
+                               "using the algorithm specified by "
+                               "the os_hash_algo",
+                "maxLength": 128,
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "os_hidden": {
+                "description": "If true, image will not appear in default"
+                               " image list response.",
+                "type": "boolean"
+            },
+            "os_version": {
+                "description": "Operating system version as specified by "
+                               "the distributor",
+                "is_base": False,
+                "type": "string"
+            },
+            "owner": {
+                "description": "Owner of the image",
+                "maxLength": 255,
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "protected": {
+                "description": "If true, image will not be deletable.",
+                "type": "boolean"
+            },
+            "ramdisk_id": {
+                "description": "ID of image stored in Glance that should"
+                               " be used as the ramdisk when booting an "
+                               "AMI-style image.",
+                "is_base": False,
+                "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
+                           "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
+                "type": [
+                    "null",
+                    "string"
+                ]
+            },
+            "schema": {
+                "description": "An image schema url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "self": {
+                "description": "An image self url",
+                "readOnly": True,
+                "type": "string"
+            },
+            "size": {
+                "description": "Size of image file in bytes",
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "integer"
+                ]
+            },
+            "status": {
+                "description": "Status of the image",
+                "enum": [
+                    "queued",
+                    "saving",
+                    "active",
+                    "killed",
+                    "deleted",
+                    "pending_delete",
+                    "deactivated",
+                    "uploading",
+                    "importing"
+                ],
+                "readOnly": True,
+                "type": "string"
+            },
+            "tags": {
+                "description": "List of strings related to the image",
+                "items": {
+                    "maxLength": 255,
+                    "type": "string"
+                },
+                "type": "array"
+            },
+            "updated_at": {
+                "description": "Date and time of the last image modification",
+                "readOnly": True,
+                "type": "string"
+            },
+            "virtual_size": {
+                "description": "Virtual size of image in bytes",
+                "readOnly": True,
+                "type": [
+                    "null",
+                    "integer"
+                ]
+            },
+            "visibility": {
+                "description": "Scope of image accessibility",
+                "enum": [
+                    "public",
+                    "private"
+                ],
+                "type": "string"
+            }
+        }
+    }
+
     def setUp(self):
         super(TestSchemasClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -89,6 +376,22 @@
             bytes_body,
             schema="members")
 
+    def _test_show_schema_image(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_schema,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_SCHEMA_IMAGE,
+            bytes_body,
+            schema="image")
+
+    def _test_show_schema_images(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_schema,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_SCHEMA_IMAGE,
+            bytes_body,
+            schema="images")
+
     def _test_show_schema(self, bytes_body=False):
         self.check_service_client_function(
             self.client.show_schema,
@@ -103,6 +406,18 @@
     def test_show_schema_members_with_bytes_body(self):
         self._test_show_schema_members(bytes_body=True)
 
+    def test_show_schema_image_with_str_body(self):
+        self._test_show_schema_image()
+
+    def test_show_schema_image_with_bytes_body(self):
+        self._test_show_schema_image(bytes_body=True)
+
+    def test_show_schema_images_with_str_body(self):
+        self._test_show_schema_images()
+
+    def test_show_schema_images_with_bytes_body(self):
+        self._test_show_schema_images(bytes_body=True)
+
     def test_show_schema_with_str_body(self):
         self._test_show_schema()
 
diff --git a/tempest/tests/lib/test_base.py b/tempest/tests/lib/test_base.py
index 2c16e1c..de6021c 100644
--- a/tempest/tests/lib/test_base.py
+++ b/tempest/tests/lib/test_base.py
@@ -48,7 +48,7 @@
     @classmethod
     def setUpClass(cls):  # noqa
         """Simulate absence of super() call."""
-        cls.orig_skip_exception = cls.skipException
+        pass
 
     def setUp(self):
         try:
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 1889420..ede6d07 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -92,7 +92,7 @@
     def setUp(self):
         super(TestRequiresExtDecorator, self).setUp()
         cfg.CONF.set_default('api_extensions', ['enabled_ext', 'another_ext'],
-                             'compute-feature-enabled')
+                             'network-feature-enabled')
 
     def _test_requires_ext_helper(self, expected_to_skip=True,
                                   **decorator_args):
@@ -116,18 +116,18 @@
     def test_requires_ext_decorator(self):
         self._test_requires_ext_helper(expected_to_skip=False,
                                        extension='enabled_ext',
-                                       service='compute')
+                                       service='network')
 
     def test_requires_ext_decorator_disabled_ext(self):
         self._test_requires_ext_helper(extension='disabled_ext',
-                                       service='compute')
+                                       service='network')
 
     def test_requires_ext_decorator_with_all_ext_enabled(self):
         cfg.CONF.set_default('api_extensions', ['all'],
-                             group='compute-feature-enabled')
+                             group='network-feature-enabled')
         self._test_requires_ext_helper(expected_to_skip=False,
                                        extension='random_ext',
-                                       service='compute')
+                                       service='network')
 
     def test_requires_ext_decorator_bad_service(self):
         self.assertRaises(KeyError,
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 7c31185..464e66a 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -240,3 +240,9 @@
             with_other_decorators=True,
             with_negative_decorator=False,
             expected_success=False)
+
+    def test_no_log_warn(self):
+        self.assertFalse(list(checks.no_log_warn(
+            'LOG.warning("LOG.warn is deprecated")')))
+        self.assertTrue(list(checks.no_log_warn(
+            'LOG.warn("LOG.warn is deprecated")')))
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index a95914a..cbb81e2 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 import os
-import sys
+import unittest
 from unittest import mock
 
 from oslo_config import cfg
@@ -34,12 +34,6 @@
 from tempest.tests.lib.services import registry_fixture
 
 
-if sys.version_info >= (2, 7):
-    import unittest
-else:
-    import unittest2 as unittest
-
-
 class LoggingTestResult(testtools.TestResult):
 
     def __init__(self, log, *args, **kwargs):
diff --git a/tools/check_logs.py b/tools/check_logs.py
index 8ab3af2..8ea94e8 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -136,7 +136,7 @@
     with open(ALLOW_LIST_FILE) as stream:
         loaded = yaml.safe_load(stream)
         if loaded:
-            for (name, l) in loaded.values():
+            for (name, l) in loaded.items():
                 for w in l:
                     assert 'module' in w, 'no module in %s' % name
                     assert 'message' in w, 'no message in %s' % name
diff --git a/tox.ini b/tox.ini
index 18f2aa6..94eb4d9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = pep8,py36,py39,bashate,pip-check-reqs
+envlist = pep8,py39,bashate,pip-check-reqs
 minversion = 3.18.0
 skipsdist = True
 ignore_basepython_conflict = True
@@ -369,6 +369,7 @@
   T115 = checks:dont_put_admin_tests_on_nonadmin_path
   T116 = checks:unsupported_exception_attribute_PY3
   T117 = checks:negative_test_attribute_always_applied_to_negative_tests
+  T118 = checks:no_log_warn
 paths =
   ./tempest/hacking
 
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index fad17dd..7535ccc 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -87,6 +87,19 @@
         horizon: true
 
 - job:
+    name: tempest-full-centos-9-stream
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-centos-9-stream
+    # centos-9-stream is supported from yoga release onwards
+    branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena)).*$
+    description: |
+      Base integration test on CentOS 9 stream
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
     name: tempest-integrated-networking
     parent: devstack-tempest
     branches: ^(?!stable/ocata).*$
@@ -129,17 +142,16 @@
         c-bak: false
 
 - job:
-    name: tempest-integrated-compute-centos-8-stream
+    name: tempest-integrated-compute-centos-9-stream
     parent: tempest-integrated-compute
-    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
-    voting: false
-    nodeset: devstack-single-node-centos-8-stream
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
+    nodeset: devstack-single-node-centos-9-stream
+    # centos-9-stream is supported from yoga release onwards
+    branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena)).*$
     description: |
       This job runs integration tests for compute. This is
       subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
       and Glance related tests. This is meant to be run on Nova gate only.
-      This version of the job also uses CentOS 8 stream.
+      This version of the job also uses CentOS 9 stream.
     vars:
       # Required until bug/1949606 is resolved when using libvirt and QEMU
       # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
@@ -335,13 +347,21 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-compute
@@ -350,16 +370,34 @@
       (Nova, Neutron, Cinder and Glance related) in check and gate
       for the Nova integrated gate. This is meant to be
       run on Nova gate only.
+    # NOTE(gmann): This template is used for stable branches also so when we
+    # add/remove jobs here we need to make sure we should not change the
+    # behaviour for stable branches. For example, with branch variant we need
+    # to make sure old job keep running on stable branches and the new one run
+    # only from master(or the branch it was meant to run).
     check:
       jobs:
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-compute
-        - tempest-integrated-compute-centos-8-stream
-        - openstacksdk-functional-devstack
+        # centos-8-stream is tested from wallaby -> yoga branches
+        - tempest-integrated-compute-centos-8-stream:
+            branches: ^stable/(wallaby|xena|yoga).*$
+        # centos-9-stream is tested from zed release onwards
+        - tempest-integrated-compute-centos-9-stream:
+            branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga)).*$
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - tempest-integrated-compute
-        - tempest-integrated-compute-centos-8-stream
-        - openstacksdk-functional-devstack
+        - tempest-integrated-compute-centos-9-stream
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-placement
@@ -371,13 +409,21 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-storage
@@ -389,13 +435,21 @@
     check:
       jobs:
         - grenade
+        - grenade-skip-level:
+            voting: false
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-object-storage
@@ -408,9 +462,15 @@
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 731a72a..0b34ae0 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -8,10 +8,9 @@
     check:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-full-parallel:
             # Define list of irrelevant files to use everywhere else
             irrelevant-files: &tempest-irrelevant-files
@@ -35,13 +34,13 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-yoga:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-wallaby-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-victoria-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-ussuri-py3:
+        - tempest-slow-wallaby:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
@@ -102,6 +101,9 @@
             voting: false
             irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
+            # TODO(kopecmartin): make it voting once the below bug is fixed
+            # https://bugs.launchpad.net/devstack-plugin-ceph/+bug/1975648
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
@@ -120,29 +122,31 @@
             irrelevant-files: *tempest-irrelevant-files
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
-        - tempest-full-py3-centos-8-stream:
+        - tempest-full-centos-9-stream:
+            # TODO(gmann): make it voting once below fix is merged
+            # https://review.opendev.org/c/openstack/tempest/+/842140
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
     gate:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-centos-8-stream:
-            irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
+        #- devstack-plugin-ceph-tempest-py3:
+        #    irrelevant-files: *tempest-irrelevant-files
+        #- tempest-full-centos-9-stream:
+        #    irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
         - tempest-with-latest-microversion
@@ -163,10 +167,12 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-yoga
         - tempest-full-xena
         - tempest-full-wallaby-py3
-        - tempest-full-victoria-py3
-        - tempest-full-ussuri-py3
+        - tempest-slow-yoga
+        - tempest-slow-xena
+        - tempest-slow-wallaby
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 5cc0dd0..d1445c0 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-yoga
+    parent: tempest-full-py3
+    override-checkout: stable/yoga
+
+- job:
     name: tempest-full-xena
     parent: tempest-full-py3
     override-checkout: stable/xena
@@ -10,15 +15,19 @@
     override-checkout: stable/wallaby
 
 - job:
-    name: tempest-full-victoria-py3
-    parent: tempest-full-py3
-    override-checkout: stable/victoria
+    name: tempest-slow-yoga
+    parent: tempest-slow-py3
+    override-checkout: stable/yoga
 
 - job:
-    name: tempest-full-ussuri-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/ussuri
+    name: tempest-slow-xena
+    parent: tempest-slow-py3
+    override-checkout: stable/xena
+
+- job:
+    name: tempest-slow-wallaby
+    parent: tempest-slow-py3
+    override-checkout: stable/wallaby
 
 - job:
     name: tempest-full-py3
@@ -199,3 +208,41 @@
       - stable/victoria
       - stable/wallaby
       - stable/xena
+
+- job:
+    name: tempest-integrated-compute-centos-8-stream
+    parent: tempest-integrated-compute
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
+    nodeset: devstack-single-node-centos-8-stream
+    branches:
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+      This version of the job also uses CentOS 8 stream.
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
+    name: tempest-full-py3-centos-8-stream
+    parent: tempest-full-py3
+    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+    voting: false
+    branches:
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+    nodeset: devstack-single-node-centos-8-stream
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on CentOS 8 stream
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 5b6b702..822feaa 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -69,20 +69,6 @@
         c-bak: false
 
 - job:
-    name: tempest-full-py3-centos-8-stream
-    parent: tempest-full-py3
-    # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
-    voting: false
-    nodeset: devstack-single-node-centos-8-stream
-    description: |
-      Base integration test with Neutron networking and py36 running
-      on CentOS 8 stream
-    vars:
-      # Required until bug/1949606 is resolved when using libvirt and QEMU
-      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
-      configure_swap_size: 4096
-
-- job:
     name: tempest-tox-plugin-sanity-check
     parent: tox
     description: |