Merge "Define default OS_TEST_TIMEOUT for every tempest tox env"
diff --git a/.gitignore b/.gitignore
index 9767e52..8b6222e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,7 +31,7 @@
 !.coveragerc
 cover/
 doc/source/_static/tempest.conf.sample
-doc/source/plugin-registry.rst
+doc/source/plugins/plugin-registry.rst
 
 # Files created by releasenotes build
 releasenotes/build
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index 70f582e..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,738 +0,0 @@
-- job:
-    name: devstack-tempest
-    parent: devstack
-    description: |
-      Base Tempest job.
-
-      This Tempest job provides the base for both the single and multi-node
-      test setup. To run a multi-node test inherit from devstack-tempest and
-      set the nodeset to a multi-node one.
-    required-projects: &base_required-projects
-      - opendev.org/openstack/tempest
-    timeout: 7200
-    roles: &base_roles
-      - zuul: opendev.org/openstack/devstack
-    vars: &base_vars
-      devstack_services:
-        tempest: true
-      devstack_local_conf:
-        test-config:
-          $TEMPEST_CONFIG:
-            compute:
-              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
-      test_results_stage_name: test_results
-      zuul_copy_output:
-        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
-        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
-        '{{ devstack_base_dir }}/tempest/tempest.log': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
-        '{{ stage_dir }}/stackviz': logs
-      extensions_to_txt:
-        conf: true
-        log: true
-        yaml: true
-        yml: true
-    run: playbooks/devstack-tempest.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-all
-    parent: devstack-tempest
-    description: |
-      Integration test that runs all tests.
-      Former name for this job was:
-        * legacy-periodic-tempest-dsvm-all-master
-    vars:
-      tox_envlist: all
-      tempest_test_regex: tempest
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-
-- job:
-    name: devstack-tempest-ipv6
-    parent: devstack-ipv6
-    description: |
-      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
-      which set the IPv6-only setting for OpenStack services. As part of
-      run phase, this job will verify the IPv6 setting and check the services
-      endpoints and listen addresses are IPv6. Basically it will run the script
-      ./tool/verify-ipv6-only-deployments.sh
-
-      Child jobs of this job can run their own set of tests and can
-      add post-run playebooks to extend the IPv6 verification specific
-      to their deployed services.
-      Check the wiki page for more details about project jobs setup
-      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
-    required-projects: *base_required-projects
-    timeout: 7200
-    roles: *base_roles
-    vars: *base_vars
-    run: playbooks/devstack-tempest-ipv6.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-ipv6-only
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Integration test of IPv6-only deployments. This job runs
-      smoke and IPv6 relates tests only. Basic idea is to test
-      whether OpenStack Services listen on IPv6 addrress or not.
-    timeout: 10800
-    vars:
-      tox_envlist: ipv6-only
-
-- job:
-    name: tempest-full
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py27.
-      Former names for this job where:
-        * legacy-tempest-dsvm-neutron-full
-        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        USE_PYTHON3: False
-      devstack_services:
-        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
-        # since tempest-full is in the integrated-gate project template but
-        # the backup tests do not really involve other services so they should
-        # be run in some more cinder-specific job, especially because the
-        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-full-oslo-master
-    parent: tempest-full
-    description: |
-      Integration test using current git of oslo libs.
-      This ensures that when oslo libs get released that they
-      do not break OpenStack server projects.
-
-      Former name for this job was
-      periodic-tempest-dsvm-oslo-latest-full-master.
-    timeout: 10800
-    required-projects:
-      - opendev.org/openstack/oslo.cache
-      - opendev.org/openstack/oslo.concurrency
-      - opendev.org/openstack/oslo.config
-      - opendev.org/openstack/oslo.context
-      - opendev.org/openstack/oslo.db
-      - opendev.org/openstack/oslo.i18n
-      - opendev.org/openstack/oslo.log
-      - opendev.org/openstack/oslo.messaging
-      - opendev.org/openstack/oslo.middleware
-      - opendev.org/openstack/oslo.policy
-      - opendev.org/openstack/oslo.privsep
-      - opendev.org/openstack/oslo.reports
-      - opendev.org/openstack/oslo.rootwrap
-      - opendev.org/openstack/oslo.serialization
-      - opendev.org/openstack/oslo.service
-      - opendev.org/openstack/oslo.utils
-      - opendev.org/openstack/oslo.versionedobjects
-      - opendev.org/openstack/oslo.vmware
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: tempest-full-parallel
-    parent: tempest-full
-    voting: false
-    branches:
-      - master
-    description: |
-      Base integration test with Neutron networking.
-      It includes all scenarios as it was in the past.
-      This job runs all scenario tests in parallel!
-    vars:
-      tox_envlist: full-parallel
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: tempest-full-py3
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py3.
-      Former names for this job where:
-        * legacy-tempest-dsvm-py35
-        * gate-tempest-dsvm-py35
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        # NOTE(mriedem): Disable the cinder-backup service from
-        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
-        # project template but the backup tests do not really involve other
-        # services so they should be run in some more cinder-specific job,
-        # especially because the tests fail at a high rate (see bugs 1483434,
-        # 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-integrated-networking
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This  job runs integration tests for networking. This is subset of
-      'tempest-full' job and run only Neutron and Nova related tests.
-      This is meant to be run on neutron gate only.
-    vars:
-      tox_envlist: integrated-network
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-compute
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for compute. This is
-      subset of 'tempest-full' job and run Nova, Neutron, Cinder (except backup tests)
-      and Glance related tests. This is meant to be run on Nova gate only.
-    vars:
-      tox_envlist: integrated-compute
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-placement
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for placement. This is
-      subset of 'tempest-full' job and run Nova and Neutron
-      related tests. This is meant to be run on Placement gate only.
-    vars:
-      tox_envlist: integrated-placement
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for image & block storage. This is
-      subset of 'tempest-full' job and run Cinder, Glance, Swift and Nova
-      related tests. This is meant to be run on Cinder and Glance gate only.
-    vars:
-      tox_envlist: integrated-storage
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
-    name: tempest-integrated-object-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for object storage. This is
-      subset of 'tempest-full' job and run Swift, Cinder and Glance
-      related tests. This is meant to be run on Swift gate only.
-    vars:
-      tox_envlist: integrated-object-storage
-      devstack_localrc:
-        # NOTE(gmann): swift is not ready on python3 yet and devstack
-        # install it on python2.7 only. But settting the USE_PYTHON3
-        # for future once swift is ready on py3.
-        USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-ipv6
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking, IPv6 and py3.
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-
-- job:
-    name: tempest-multinode-full-base
-    parent: devstack-tempest
-    description: |
-      Base multinode integration test with Neutron networking and py27.
-      Former names for this job were:
-        * neutron-tempest-multinode-full
-        * legacy-tempest-dsvm-neutron-multinode-full
-        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
-      This job includes two nodes, controller / tempest plus a subnode, but
-      it can be used with different topologies, as long as a controller node
-      and a tempest one exist.
-    timeout: 10800
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        FORCE_CONFIG_DRIVE: false
-        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-        LIVE_MIGRATION_AVAILABLE: true
-        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-    group-vars:
-      peers:
-        devstack_localrc:
-          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-          LIVE_MIGRATION_AVAILABLE: true
-          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-bionic
-    # This job runs on Bionic from stable/stein on.
-    branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-xenial
-    # This job runs on Xenial and this is for stable/pike, stable/queens
-    # and stable/rocky. This job is prepared to make sure all stable branches
-    # before stable/stein will keep running on xenial. This job can be
-    # removed once stable/rocky is EOL.
-    branches:
-      - stable/pike
-      - stable/queens
-      - stable/rocky
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-opensuse15
-    parent: tempest-full-py3
-    nodeset: devstack-single-node-opensuse-15
-    description: |
-      Base integration test with Neutron networking and py36 running
-      on openSUSE Leap 15.x
-    voting: false
-
-- job:
-    name: tempest-slow
-    parent: tempest-multinode-full
-    description: |
-      This multinode integration job will run all the tests tagged as slow.
-      It enables the lvm multibackend setup to cover few scenario tests.
-      This job will run only slow tests (API or Scenario) serially.
-
-      Former names for this job were:
-        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
-        * tempest-scenario-multinode-lvm-multibackend
-    timeout: 10800
-    vars:
-      tox_envlist: slow-serial
-      devstack_localrc:
-        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron
-      devstack_services:
-        neutron-placement: true
-        neutron-qos: true
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
-      tempest_concurrency: 2
-    group-vars:
-      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
-      # the controller and subnode prior to Rocky so we have to make sure the
-      # variable is set in both locations.
-      subnode:
-        devstack_localrc:
-          ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
-    name: tempest-slow-py3
-    parent: tempest-slow
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-train-py3
-    parent: tempest-full-py3
-    override-checkout: stable/train
-
-- job:
-    name: tempest-full-stein-py3
-    parent: tempest-full-py3
-    override-checkout: stable/stein
-
-- job:
-    name: tempest-full-rocky-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-xenial
-    override-checkout: stable/rocky
-
-- job:
-    name: tempest-tox-plugin-sanity-check
-    parent: tox
-    description: |
-      Run tempest plugin sanity check script using tox.
-    nodeset: ubuntu-bionic
-    vars:
-      tox_envlist: plugin-sanity-check
-    timeout: 5000
-
-- job:
-    name: tempest-cinder-v2-api
-    parent: devstack-tempest
-    branches:
-      - master
-    description: |
-      This job runs the cinder API test against v2 endpoint.
-    vars:
-      tox_envlist: all
-      tempest_test_regex: api.*volume
-      devstack_localrc:
-        TEMPEST_VOLUME_TYPE: volumev2
-
-- job:
-    name: tempest-full-test-account-py3
-    parent: tempest-full-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials instead of dynamic credentials and py3.
-      Former names for this job were:
-        - legacy-tempest-dsvm-full-test-accounts
-        - legacy-tempest-dsvm-neutron-full-test-accounts
-        - legacy-tempest-dsvm-identity-v3-test-accounts
-    vars:
-      devstack_localrc:
-        TEMPEST_USE_TEST_ACCOUNTS: True
-
-- job:
-    name: tempest-full-test-account-no-admin-py3
-    parent: tempest-full-test-account-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials and py3 without having an admin account.
-      Former name for this job was:
-        - legacy-tempest-dsvm-neutron-full-non-admin
-
-    vars:
-      devstack_localrc:
-        TEMPEST_HAS_ADMIN: False
-
-- job:
-    name: tempest-pg-full
-    parent: tempest-full
-    description: |
-      Base integration test with Neutron networking and PostgreSQL.
-      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
-    vars:
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        DATABASE_TYPE: postgresql
-        USE_PYTHON3: True
-
-- project-template:
-    name: integrated-gate-networking
-    description: |
-      Run the python3 Tempest network integration tests (Nova and Neutron related)
-      in check and gate for the neutron integrated gate. This is meant to be
-      run on neutron gate only.
-    check:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-networking
-    gate:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-networking
-
-- project-template:
-    name: integrated-gate-compute
-    description: |
-      Run the python3 Tempest compute integration tests
-      (Nova, Neutron, Cinder and Glance related) in check and gate
-      for the Nova integrated gate. This is meant to be
-      run on Nova gate only.
-    check:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-compute
-    gate:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-compute
-
-- project-template:
-    name: integrated-gate-placement
-    description: |
-      Run the python3 Tempest placement integration tests
-      (Nova and Neutron related) in check and gate
-      for the Placement integrated gate. This is meant to be
-      run on Placement gate only.
-    check:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-placement
-    gate:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-placement
-
-- project-template:
-    name: integrated-gate-storage
-    description: |
-      Run the python3 Tempest image & block storage integration tests
-      (Cinder, Glance, Swift and Nova related) in check and gate
-      for the neutron integrated gate. This is meant to be
-      run on Cinder and Glance gate only.
-    check:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-storage
-    gate:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-storage
-
-- project-template:
-    name: integrated-gate-object-storage
-    description: |
-      Run the python3 Tempest object storage integration tests
-      (Swift, Cinder and Glance related) in check and gate
-      for the swift integrated gate. This is meant to be
-      run on swift gate only.
-    check:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-object-storage
-    gate:
-      jobs:
-        - grenade-py3
-        - tempest-integrated-object-storage
-
-- project:
-    templates:
-      - check-requirements
-      - integrated-gate-py3
-      - openstack-cover-jobs
-      - openstack-python3-ussuri-jobs
-      - publish-openstack-docs-pti
-      - release-notes-jobs-python3
-    check:
-      jobs:
-        - devstack-tempest:
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - devstack-tempest-ipv6:
-            voting: false
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - tempest-full-parallel:
-            # Define list of irrelevant files to use everywhere else
-            irrelevant-files: &tempest-irrelevant-files
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^tools/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-ipv6:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-train-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-stein-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-rocky-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-multinode-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-tox-plugin-sanity-check:
-            irrelevant-files: &tempest-irrelevant-files-2
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-              # tools/ is not here since this relies on a script in tools/.
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-live-migration:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - devstack-plugin-ceph-tempest-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr:
-            irrelevant-files: *tempest-irrelevant-files
-        - interop-tempest-consistency:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-no-admin-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - openstack-tox-bashate:
-            irrelevant-files: *tempest-irrelevant-files-2
-    gate:
-      jobs:
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-    experimental:
-      jobs:
-        - tempest-cinder-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-all:
-            irrelevant-files: *tempest-irrelevant-files
-        - legacy-tempest-dsvm-neutron-dvr-multinode-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr-ha-multinode-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-tempest-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - legacy-tempest-dsvm-lvm-multibackend:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-pg-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-opensuse15:
-            irrelevant-files: *tempest-irrelevant-files
-    periodic-stable:
-      jobs:
-        - tempest-full-train-py3
-        - tempest-full-stein-py3
-        - tempest-full-rocky-py3
-    periodic:
-      jobs:
-        - tempest-all
-        - tempest-full-oslo-master
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a89ad94..2300763 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,17 +1,19 @@
-If you would like to contribute to the development of OpenStack, you must
-follow the steps in this page:
+The source repository for this project can be found at:
 
-   https://docs.openstack.org/infra/manual/developers.html
+   https://opendev.org/openstack/tempest
 
-If you already have a good understanding of how the system works and your
-OpenStack accounts are set up, you can skip to the development workflow
-section of this documentation to learn how changes to OpenStack should be
-submitted for review via the Gerrit tool:
+Pull requests submitted through GitHub are not monitored.
 
-   https://docs.openstack.org/infra/manual/developers.html#development-workflow
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
 
-Pull requests submitted through GitHub will be ignored.
+   https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
 
-Bugs should be filed on Launchpad, not GitHub:
+Bugs should be filed on Launchpad:
 
    https://bugs.launchpad.net/tempest
+
+For more specific information about contributing to this repository, see the
+Tempest contributor guide:
+
+   https://docs.openstack.org/tempest/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index 204b3c7..95bcbb5 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -12,7 +12,6 @@
   tempest/scenario tests
 - [T104] Scenario tests require a services decorator
 - [T105] Tests cannot use setUpClass/tearDownClass
-- [T106] vim configuration should not be kept in source files.
 - [T107] Check that a service tag isn't in the module path
 - [T108] Check no hyphen at the end of rand_name() argument
 - [T109] Cannot use testtools.skip decorator; instead use
@@ -60,7 +59,7 @@
 `relevant plugin projects`_.
 
 .. _External Plugin Interface: https://specs.openstack.org/openstack/qa-specs/specs/tempest/implemented/tempest-external-plugin-interface.html
-.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugin-registry.html#detected-plugins
+.. _relevant plugin projects: https://docs.openstack.org/tempest/latest/plugins/plugin-registry.html#detected-plugins
 
 Exception Handling
 ------------------
diff --git a/REVIEWING.rst b/REVIEWING.rst
index e07e358..4c63aa0 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -160,13 +160,11 @@
 When to approve
 ---------------
 * It's OK to hold off on an approval until a subject matter expert reviews it.
-* Every patch needs two +2's before being approved.
-* However, a single Tempest core reviewer can approve patches without waiting
-  for another +2 in the following cases:
+* Every patch needs at least single +2's before being approved. A single
+  Tempest core reviewer can approve patches but can always wait for another
+  +2 in any case. Following cases where single +2 can be used without any
+  issue:
 
-  * If a patch has already been approved but requires a trivial rebase to
-    merge, then there is no need to wait for a second +2, since the patch has
-    already had two +2's.
   * If any trivial patch set fixes one of the items below:
 
     * Documentation or code comment typo
@@ -187,7 +185,4 @@
     voting ``tempest-tox-plugin-sanity-check`` job) and unblock the
     tempest gate
 
-  Note that such a policy should be used judiciously, as we should strive to
-  have two +2's on each patch set, prior to approval.
-
 .. _example: https://review.opendev.org/#/c/611032/
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 9f38ada..30394e8 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,7 +1,7 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-openstackdocstheme>=1.20.0 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
+openstackdocstheme>=2.2.0 # Apache-2.0
+reno>=3.1.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
 sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 7745594..5422af7 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1 +1,4 @@
 redirectmatch 301 ^/developer/tempest/(.*) /tempest/latest/$1
+redirectmatch 301 ^/tempest/latest/plugin.html /tempest/latest/plugins/plugin.html
+redirectmatch 301 ^/tempest/latest/plugin-registry.html /tempest/latest/plugins/plugin-registry
+redirectmatch 301 ^/tempest/latest/#support-policy /tempest/latest/#stable-branch-support-policy
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 7ce431e..b738fde 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -24,6 +24,7 @@
 
 import os
 import subprocess
+import sys
 
 # Build the plugin registry
 def build_plugin_registry(app):
@@ -31,16 +32,20 @@
         os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
     subprocess.call(['tools/generate-tempest-plugins-list.sh'], cwd=root_dir)
 
+def autodoc_skip_member_handler(app, what, name, obj, skip, options):
+    return skip or (what == "class" and not name.startswith("test"))
+
 def setup(app):
+    app.connect('autodoc-skip-member', autodoc_skip_member_handler)
     if os.getenv('GENERATE_TEMPEST_PLUGIN_LIST', 'true').lower() == 'true':
         app.connect('builder-inited', build_plugin_registry)
 
-
-
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath('../../tempest'))
+sys.path.insert(0, os.path.abspath('../../tempest/api'))
 
 # -- General configuration -----------------------------------------------------
 
@@ -63,9 +68,10 @@
 todo_include_todos = True
 
 # openstackdocstheme options
-repository_name = 'openstack/tempest'
-bug_project = 'tempest'
-bug_tag = 'doc'
+openstackdocs_repo_name = 'openstack/tempest'
+openstackdocs_bug_project = 'tempest'
+openstackdocs_bug_tag = 'doc'
+openstackdocs_pdf_link = True
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -80,7 +86,7 @@
 master_doc = 'index'
 
 # General information about the project.
-copyright = u'2013, OpenStack QA Team'
+copyright = '2013, OpenStack QA Team'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -111,7 +117,7 @@
 show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
 
 # A list of ignored prefixes for module index sorting.
 modindex_common_prefix = ['tempest.']
@@ -200,9 +206,13 @@
 # (source start file, target name, title, author, documentclass
 # [howto/manual]).
 latex_documents = [
-    ('index', 'doc-tempest.tex', u'Tempest Testing Project',
-     u'OpenStack Foundation', 'manual'),
+    ('index', 'doc-tempest.tex', 'Tempest Testing Project',
+     'OpenStack Foundation', 'manual'),
 ]
 
-# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
 latex_use_xindy = False
+
+latex_elements = {
+    'maxlistdepth': 20,
+    'printindex': '\\footnotesize\\raggedright\\printindex'
+}
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 36828e0..c790c5f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -153,6 +153,11 @@
 
 Pre-Provisioned Credentials are also known as accounts.yaml or accounts file.
 
+Keystone Scopes & Roles Support in Tempest
+""""""""""""""""""""""""""""""""""""""""""
+For details on scope and roles support in Tempest,
+please refer to :doc:`this document <keystone_scopes_and_roles_support>`
+
 Compute
 -------
 
@@ -193,10 +198,6 @@
 There are also options in the ``scenario`` section for images:
 
 #. ``img_file``
-#. ``img_dir``
-#. ``aki_img_file``
-#. ``ari_img_file``
-#. ``ami_img_file``
 #. ``img_container_format``
 #. ``img_disk_format``
 
@@ -205,13 +206,9 @@
 Tempest where an image file is located and describe its metadata for when it is
 uploaded.
 
-The behavior of these options is a bit convoluted (which will likely be fixed in
-future versions). You first need to specify ``img_dir``, which is the directory
-in which Tempest will look for the image files. First, it will check if the
-filename set for ``img_file`` could be found in ``img_dir``. If it is found then
-the ``img_container_format`` and ``img_disk_format`` options are used to upload
-that image to glance. However, if it is not found, Tempest will look for the
-three uec image file name options as a fallback. If neither is found, the tests
+You first need to specify full path of the image using ``img_file`` option.
+If it is found then the ``img_container_format`` and ``img_disk_format``
+options are used to upload that image to glance. If it's not found, the tests
 requiring an image to upload will fail.
 
 It is worth pointing out that using `cirros`_ is a very good choice for running
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..139f0b7
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,58 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Tempest.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at OFTC
+* Mailing list (prefix subjects with ``[qa]`` for faster responses)
+  http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Tempest Core Team
+<https://review.opendev.org/#/admin/groups/42,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Tempest features are tracked on `Launchpad BP <https://blueprints.launchpad.net/tempest>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/tempest>`_.
+
+If you're looking for some smaller, easier work item to pick up and get started
+on, search for the 'low-hanging-fruit' tag.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/tempest/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Tempest require single ``Code-Review +2`` votes from
+Tempest core reviewers by giving ``Workflow +1`` vote. More detailed guidelines
+for reviewers are available at :doc:`../REVIEWING`.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/data/tempest-blacklisted-plugins-registry.header b/doc/source/data/tempest-non-active-plugins-registry.header
similarity index 67%
rename from doc/source/data/tempest-blacklisted-plugins-registry.header
rename to doc/source/data/tempest-non-active-plugins-registry.header
index 6b6af11..06d8eaa 100644
--- a/doc/source/data/tempest-blacklisted-plugins-registry.header
+++ b/doc/source/data/tempest-non-active-plugins-registry.header
@@ -1,7 +1,7 @@
-Blacklisted Plugins
+Non Active Plugins
 ===================
 
 List of Tempest plugin projects that are stale or unmaintained for a long
-time (6 months or more). They can be moved out of blacklist state once one
+time (6 months or more). They can be moved out of nonactivelist state once one
 of the relevant patches gets merged:
 https://review.opendev.org/#/q/topic:tempest-sanity-gate+%28status:open%29
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ab994d1..2f29cf2 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -56,6 +56,23 @@
 
    supported_version
 
+Description of Tests
+--------------------
+.. toctree::
+   :maxdepth: 2
+
+   tests/modules
+
+For Contributors
+================
+
+* If you are a new contributor to Tempest please refer: :doc:`contributor/contributing`
+
+.. toctree::
+   :hidden:
+
+   contributor/contributing
+
 Developers Guide
 ================
 
@@ -70,6 +87,7 @@
    microversion_testing
    test_removal
    write_tests
+   requirement_upper_constraint_for_tempest
 
 Plugins
 -------
@@ -77,8 +95,39 @@
 .. toctree::
    :maxdepth: 2
 
-   plugin
-   plugin-registry
+   plugins/index
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   tempest_and_plugins_compatible_version_policy
+
+Keystone Scopes & Roles Support in Tempest
+------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   keystone_scopes_and_roles_support
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   stable_branch_testing_policy
 
 Library
 -------
@@ -88,14 +137,6 @@
 
    library
 
-Support Policy
---------------
-
-.. toctree::
-   :maxdepth: 2
-
-   stable_branch_support_policy
-
 Search
 ======
 
@@ -104,4 +145,4 @@
   * :ref:`Tempest document search <search>`: Search the contents of this document.
 
 * `OpenStack wide search <https://docs.openstack.org>`_: Search the wider
-  set of OpenStack documentation, including forums.
\ No newline at end of file
+  set of OpenStack documentation, including forums.
diff --git a/doc/source/keystone_scopes_and_roles_support.rst b/doc/source/keystone_scopes_and_roles_support.rst
new file mode 100644
index 0000000..f446f8c
--- /dev/null
+++ b/doc/source/keystone_scopes_and_roles_support.rst
@@ -0,0 +1,286 @@
+Keystone Scopes & Roles Support in Tempest
+==========================================
+
+OpenStack Keystone supports different scopes in token, refer to the
+`Keystone doc <https://docs.openstack.org/keystone/latest/admin/tokens-overview.html#authorization-scopes>`_.
+Along with the scopes, keystone supports default roles, one of which
+is a reader role, for details refer to
+`this keystone document <https://docs.openstack.org/keystone/latest/admin/service-api-protection.html>`_.
+
+Tempest supports those scopes and roles credentials that can be used
+to test APIs under different scope and roles.
+
+Dynamic Credentials
+-------------------
+
+Dynamic credential supports all the below set of personas and allows
+you to generate credentials tailored to a specific persona that you
+can use in your test.
+
+Domain scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. Domain Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_admin_client = (
+                     cls.os_domain_admin.availability_zone_client)
+
+  #. Domain Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_member_client = (
+                     cls.os_domain_member.availability_zone_client)
+
+  #. Domain Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_reader_client = (
+                     cls.os_domain_reader.availability_zone_client)
+
+  #. Domain other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``domain`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'domain' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['domain_my_role1', 'my_own_role1', 'admin']
+                            ['domain_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_role1_client = (
+                     cls.os_domain_my_role1.availability_zone_client)
+                 cls.az_d_role2_client = (
+                     cls.os_domain_my_role2.availability_zone_client)
+
+System scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. System Admin: This is supported and can be requested and used from the
+     test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_admin_client = (
+                     cls.os_system_admin.availability_zone_client)
+
+  #. System Member: This is supported and can be requested and used from the
+     test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_member_client = (
+                     cls.os_system_member.availability_zone_client)
+
+  #. System Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_reader_client = (
+                     cls.os_system_reader.availability_zone_client)
+
+  #. System other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``system`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'project' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['system_my_role1', 'my_own_role1', 'admin']
+                            ['system_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_role1_client = (
+                     cls.os_system_my_role1.availability_zone_client)
+                 cls.az_s_role2_client = (
+                     cls.os_system_my_role2.availability_zone_client)
+
+Project scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. Project Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_admin_client = (
+                     cls.os_project_admin.availability_zone_client)
+
+  #. Project Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_member_client = (
+                     cls.os_project_member.availability_zone_client)
+
+  #. Project Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_reader_client = (
+                     cls.os_project_reader.availability_zone_client)
+
+  #. Project alternate Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_admin_client = (
+                     cls.os_project_alt_admin.availability_zone_client)
+
+  #. Project alternate Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_member_client = (
+                     cls.os_project_alt_member.availability_zone_client)
+
+  #. Project alternate Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_reader_client = (
+                     cls.os_project_alt_reader.availability_zone_client)
+
+  #. Project other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``project`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'project' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['project_my_role1', 'my_own_role1', 'admin']
+                            ['project_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_role1_client = (
+                     cls.os_project_my_role1.availability_zone_client)
+                 cls.az_role2_client = (
+                     cls.os_project_my_role2.availability_zone_client)
+
+Pre-Provisioned Credentials
+---------------------------
+
+Pre-Provisioned credentials support the below set of personas and can be
+used in the test as shown above in the ``Dynamic Credentials`` Section.
+
+* Domain Admin
+* Domain Member
+* Domain Reader
+* System Admin
+* System Member
+* System Reader
+* Project Admin
+* Project Member
+* Project Reader
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index b4f06e3..0b80b72 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
 
 .. code-block:: python
 
-    class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+   class BaseTestCase1(api_version_utils.BaseMicroversionTest):
 
-        [..]
-    @classmethod
-    def skip_checks(cls):
-        super(BaseTestCase1, cls).skip_checks()
-        api_version_utils.check_skip_with_microversion(cls.min_microversion,
-                                                       cls.max_microversion,
-                                                       CONF.compute.min_microversion,
-                                                       CONF.compute.max_microversion)
+       [..]
+       @classmethod
+       def skip_checks(cls):
+           super(BaseTestCase1, cls).skip_checks()
+           api_version_utils.check_skip_with_microversion(cls.min_microversion,
+                                                          cls.max_microversion,
+                                                          CONF.compute.min_microversion,
+                                                          CONF.compute.max_microversion)
 
 Skip logic can be added in tests base class or any specific test class depends on
 tests class structure.
@@ -302,6 +302,10 @@
 
   .. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
 
+  * `2.3`_
+
+  .. _2.3: http://docs.openstack.org/developer/nova/api_microversion_history.html#maximum-in-kilo
+
   * `2.6`_
 
   .. _2.6: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id5
@@ -352,15 +356,15 @@
 
   * `2.37`_
 
-  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
+  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
 
   * `2.39`_
 
-  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
+  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id36
 
   * `2.41`_
 
-  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id37
+  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id38
 
   * `2.42`_
 
@@ -368,15 +372,15 @@
 
   * `2.47`_
 
-  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
 
   * `2.48`_
 
-  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
 
   * `2.49`_
 
-  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
 
   * `2.53`_
 
@@ -384,15 +388,19 @@
 
   * `2.54`_
 
-  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
 
   * `2.55`_
 
-  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
+  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
 
   * `2.57`_
 
-  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
+  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53
+
+  * `2.59`_
+
+  .. _2.59: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
 
   * `2.60`_
 
@@ -400,19 +408,27 @@
 
   * `2.61`_
 
-  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id56
 
   * `2.63`_
 
-  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
+  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
 
   * `2.70`_
 
-  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id63
+  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
 
   * `2.71`_
 
-  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id65
+
+  * `2.73`_
+
+  .. _2.73: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id66
+
+  * `2.79`_
+
+  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train 
 
 * Volume
 
@@ -447,3 +463,7 @@
   * `3.20`_
 
   .. _3.20:  https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id19
+
+  * `3.55`_
+
+  .. _3.55:  https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#maximum-in-rocky
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index e51b90b..2eaf72f 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -113,7 +113,7 @@
    There is also the option to use `stestr`_ directly. For example, from
    the workspace dir run::
 
-    $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
+    $ stestr run --exclude-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
 
    will run the same set of tests as the default gate jobs. Or you can
    use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
diff --git a/doc/source/plugins/index.rst b/doc/source/plugins/index.rst
new file mode 100644
index 0000000..f961ac7
--- /dev/null
+++ b/doc/source/plugins/index.rst
@@ -0,0 +1,40 @@
+=====================
+Tempest Plugins Guide
+=====================
+
+.. toctree::
+   :maxdepth: 2
+
+   plugin
+
+Stable Branch Support Policy
+----------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   ../stable_branch_support_policy
+
+Stable Branch Testing Policy
+----------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   ../stable_branch_testing_policy
+
+Tempest & Plugins Compatible Version Policy
+-------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   ../tempest_and_plugins_compatible_version_policy
+
+Plugins Registry
+----------------
+
+.. toctree::
+   :maxdepth: 2
+
+   plugin-registry
diff --git a/doc/source/plugin.rst b/doc/source/plugins/plugin.rst
similarity index 97%
rename from doc/source/plugin.rst
rename to doc/source/plugins/plugin.rst
index a9e2059..b1fd6f8 100644
--- a/doc/source/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -31,6 +31,7 @@
 * tempest.common.credentials_factory
 * tempest.clients
 * tempest.test
+* tempest.scenario.manager
 
 If there is an interface from Tempest that you need to rely on in your plugin
 which is not listed above, it likely needs to be migrated to tempest.lib. In
@@ -43,7 +44,7 @@
 In order to create the basic structure with base classes and test directories
 you can use the tempest-plugin-cookiecutter project::
 
-  > pip install -U cookiecutter && cookiecutter https://opendev.org/openstack/tempest-plugin-cookiecutter
+  > pip install -U cookiecutter && cookiecutter https://opendev.org/openstack/tempest-plugin-cookiecutter.git
 
   Cloning into 'tempest-plugin-cookiecutter'...
   remote: Counting objects: 17, done.
@@ -268,12 +269,12 @@
 
    class MyAPIClient(rest_client.RestClient):
 
-    def __init__(self, auth_provider, service, region,
-                 my_arg, my_arg2=True, **kwargs):
-        super(MyAPIClient, self).__init__(
-            auth_provider, service, region, **kwargs)
-        self.my_arg = my_arg
-        self.my_args2 = my_arg
+       def __init__(self, auth_provider, service, region,
+                    my_arg, my_arg2=True, **kwargs):
+           super(MyAPIClient, self).__init__(
+               auth_provider, service, region, **kwargs)
+           self.my_arg = my_arg
+           self.my_args2 = my_arg
 
 Finally the service client should be structured in a python module, so that all
 service client classes are importable from it. Each major API version should
diff --git a/doc/source/requirement_upper_constraint_for_tempest.rst b/doc/source/requirement_upper_constraint_for_tempest.rst
new file mode 100644
index 0000000..2eebdda
--- /dev/null
+++ b/doc/source/requirement_upper_constraint_for_tempest.rst
@@ -0,0 +1,56 @@
+Requirements Upper Constraint for Tempest
+=========================================
+
+Tempest is branchless and supported stable branches use Tempest
+master and all EM stable branches use old compatible Tempest version
+for their testing. This means the OpenStack installed upper-constraints
+might not be compatible with Tempest used for stable branch testing.
+For example, if Tempest master is used for testing the stable/stein
+then stable/stein constraint might not be compatible with Tempest master so
+we need to use master upper-constraints there. That is why we use virtual
+env for Tempest installation and running tests so that we can control Tempest
+required constraint from system wide installed constraints.
+
+Devstack takes care of using the master upper-constraints when Tempest master
+is used. But when old Tempest is used then devstack alone cannot handle the
+compatible constraints because Tempest in-tree tox.ini also set the
+upper-constraints which are master constraints so if devstack set the different
+constraints than what we have in tox.ini we end up re-creation of venv which
+flush all previously installed tempest plugins in that venv. More details are
+on `this ML thread <http://lists.openstack.org/pipermail/openstack-discuss/2020-April/014388.html>`_
+
+To solve that problem we have two ways:
+
+#. Set UPPER_CONSTRAINTS_FILE to compatible constraint path
+   This option is not easy as it requires to set this env var everywhere
+   Tempest tox env is used like in devstack, grenade, projects side, zuulv3 roles etc.
+
+#. Pin upper-constraints in tox.ini
+   If we can pin the upper-constraints in tox.ini on every release with the branch
+   constraint at the time of release then we can solve it in an easy way because tox
+   can use the compatible constraint at the time of venv creation itself. But this can
+   again mismatch with the devstack set constraint so we need to follow the below process
+   to make it work.
+
+How to pin upper-constraints in tox.ini
+---------------------------------------
+
+This has to be done exactly before we cut the Tempest new major version bump
+release for the cycle.
+
+Step1: Add the pin constraint proposal in `QA office hour <https://wiki.openstack.org/wiki/Meetings/QATeamMeeting#Agenda_for_next_Office_hours>`_.
+       Pin constraint proposal includes:
+
+       - pin constraint patch. `Example patch 720578 <https://review.opendev.org/#/c/720578/>`_
+       - revert of pin constraint patch. `Example patch 721724 <https://review.opendev.org/#/c/721724/>`_
+
+Step2: Approve pin constraint and its revert patch together.
+       During office hour we need to check that there are no open patches for
+       Tempest release and accordingly we fast approve the 'pin constraint' and its
+       revert patch during office hour itself. Remember 'pin constraint patch' has to be
+       the last commit to include in Tempest release.
+
+Step3: Use 'pin constraint patch' hash for the Tempest new release.
+       By using the 'pin constraint patch' hash we make sure tox.ini in Tempest
+       released tag has the compatible stable constraint not the master one.
+       For Example `Tempest 24.0.0 <https://opendev.org/openstack/tempest/src/tag/24.0.0/tox.ini#L14>`_
diff --git a/doc/source/sampleconf.rst b/doc/source/sampleconf.rst
index c290140..45164a3 100644
--- a/doc/source/sampleconf.rst
+++ b/doc/source/sampleconf.rst
@@ -10,4 +10,6 @@
 
 The sample configuration can also be viewed in `file form <_static/tempest.conf.sample>`_.
 
-.. literalinclude:: _static/tempest.conf.sample
+.. only:: html
+
+   .. literalinclude:: _static/tempest.conf.sample
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 87e3ad1..9c2d1ed 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -20,7 +20,7 @@
 testing branches in these phases, it's possible that we'll introduce changes to
 Tempest on master which will break support on *Extended Maintenance* phase
 branches. When this happens the expectation for those branches is to either
-switch to running Tempest from a tag with support for the branch, or blacklist
+switch to running Tempest from a tag with support for the branch, or exclude
 a newly introduced test (if that is the cause of the issue). Tempest will not
 be creating stable branches to support *Extended Maintenance* phase branches, as
 the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
diff --git a/doc/source/stable_branch_testing_policy.rst b/doc/source/stable_branch_testing_policy.rst
new file mode 100644
index 0000000..02c5338
--- /dev/null
+++ b/doc/source/stable_branch_testing_policy.rst
@@ -0,0 +1,33 @@
+Stable Branch Testing Policy
+============================
+
+Tempest and its plugins need to support the stable branches
+as per :doc:`Stable Branch Support Policy </stable_branch_support_policy>`.
+
+Because of branchless model of Tempest and plugins, all the supported
+stable branches use the Tempest and plugins master version for their
+testing. That is done in devstack by using the `master branch
+<https://opendev.org/openstack/devstack/src/commit/c104afec7dd72edfd909847bee9c14eaf077a28b/stackrc#L314>`_
+for the Tempest installation. To make sure the master version of Tempest or
+plugins (for any changes or adding new tests) is compatible for all
+the supported stable branches testing, Tempest and its plugins need to
+add the stable branches job on the master gate. That way can test the stable
+branches against master code and can avoid breaking supported branches
+accidentally.
+
+Example:
+
+* `Stable jobs on Tempest master
+  <https://opendev.org/openstack/tempest/src/commit/e8f1876aa6772077f85f380677b30251c2454505/.zuul.yaml#L646-L651>`_.
+
+* `Stable job on neutron tempest plugins
+  <https://opendev.org/openstack/neutron-tempest-plugin/src/commit/4bc1b00213cf660648cad1916fe6497ac29b2e78/.zuul.yaml#L1427-L1428>`_
+
+Once any stable branch is moved to the `Extended Maintenance Phases`_
+and devstack start using the Tempest older version for that stable
+branch testing then we can remove that stable branch job from master
+gate.
+
+Example: https://review.opendev.org/#/c/722183/
+
+.. _Extended Maintenance Phases: https://docs.openstack.org/project-team-guide/stable-branches.html#extended-maintenance
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 4f65fd4..4ca7f0d 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,9 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Victoria
+* Ussuri
 * Train
-* Stein
-* Rocky
 
 For older OpenStack Release:
 
@@ -34,3 +34,4 @@
 
 * Python 3.6
 * Python 3.7
+* Python 3.8
diff --git a/doc/source/tempest_and_plugins_compatible_version_policy.rst b/doc/source/tempest_and_plugins_compatible_version_policy.rst
new file mode 100644
index 0000000..942b1bd
--- /dev/null
+++ b/doc/source/tempest_and_plugins_compatible_version_policy.rst
@@ -0,0 +1,54 @@
+Tempest and Plugins compatible version policy
+=============================================
+
+Tempest and its plugins are responsible for the integrated
+testing of OpenStack. These tools have two use cases:
+
+#. Testing upstream code at gate
+#. Testing Production Cloud
+
+Upstream code is tested by the master version of branchless Tempest & plugins
+for all supported stable branches in `Maintained phase`_.
+
+Production Cloud can be tested by using the compatible version or using
+master version. It depends on the testing strategy of cloud. To provide
+the compatible version of Tempest and its Plugins per OpenStack release,
+we started the coordinated release of all plugins and Tempest per OpenStack
+release.
+These versions are the first set of versions from Tempest and its Plugins to
+officially start the support of a particular OpenStack release. For example:
+OpenStack Train release first compatible versions `Tempest plugins version`_.
+
+Because of branchless nature of Tempest and its plugins, first version
+released during OpenStack release is not the last version to support that
+OpenStack release. This means the next (or master) versions can also be used
+for upstream testing as well as in production testing.
+
+Since the `Extended Maintenance policy`_ for stable branch, Tempest
+started releasing the ``end of support`` version once stable release
+is moved to EM state, which used to happen on EOL of stable release. This is
+the last compatible version of Tempest for the OpenStack release moved to EM.
+
+Because of branchless nature as explained above, we have a range of versions
+which can be considered a compatible version for particular OpenStack release.
+How we should release those versions is mentioned in the below table.
+
+ +-----------------------------+-----------------+------------------------------------+
+ | First compatible version -> | OpenStack 'XYZ' | <- Last compatible version         |
+ +=============================+=================+====================================+
+ |This is the latest version   |                 |This is the version released        |
+ |released when OpenStack      |                 |when OpenStack 'XYZ' is moved to    |
+ |'XYZ' is released.           |                 |EM state. Hash used for this should |
+ |Example:                     |                 |be the hash from master at the time |
+ |`Tempest plugins version`_   |                 |of branch is EM not the one used for|
+ |                             |                 |First compatible version            |
+ +-----------------------------+-----------------+------------------------------------+
+
+Tempest & the Plugins should follow the above mentioned policy for the
+``First compatible version`` and the ``Last compatible version.``
+so that we provide the right set of compatible versions to Upstream as well as to
+Production Cloud testing.
+
+.. _Maintained phase: https://docs.openstack.org/project-team-guide/stable-branches.html#maintained
+.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
+.. _Tempest plugins version: https://releases.openstack.org/train/#tempest-plugins
diff --git a/doc/source/test_removal.rst b/doc/source/test_removal.rst
index ff4fa09..a3bb645 100644
--- a/doc/source/test_removal.rst
+++ b/doc/source/test_removal.rst
@@ -105,16 +105,16 @@
 value.
 
 However, for the 3rd prong verification is a bit more subjective. The original
-intent of this prong was mostly for refstack/defcore and also for things that
+intent of this prong was mostly for interop/refstack and also for things that
 running on the stable branches. We don't want to remove any tests if that
 would break our API consistency checking between releases, or something that
-defcore/refstack is depending on being in Tempest. It's worth pointing out
-that if a test is used in `defcore`_ as part of `interop`_ testing then it will
-probably have continuing value being in Tempest as part of the
+interop/refstack is depending on being in Tempest. It's worth pointing out
+that if a test is used in `interop_wg`_ as part of `interop`_ testing then it
+will probably have continuing value being in Tempest as part of the
 integration/integrated tests in general. This is one area where some overlap
 is expected between testing in projects and Tempest, which is not a bad thing.
 
-.. _defcore: https://wiki.openstack.org/wiki/Governance/InteropWG
+.. _interop_wg: https://docs.opendev.org/openinfra/interop/latest/
 .. _interop: https://www.openstack.org/brand/interop
 
 Discussing the 3rd prong
diff --git a/doc/source/tests/modules.rst b/doc/source/tests/modules.rst
new file mode 100644
index 0000000..026a7a5
--- /dev/null
+++ b/doc/source/tests/modules.rst
@@ -0,0 +1,21 @@
+Description of Tests
+====================
+
+OpenStack Services Integration Tests
+------------------------------------
+.. toctree::
+   :maxdepth: 2
+
+   scenario/modules
+
+OpenStack Services API Tests
+----------------------------
+.. toctree::
+   :maxdepth: 2
+
+   compute/modules
+   identity/modules
+   image/modules
+   network/modules
+   object_storage/modules
+   volume/modules
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-    @classmethod
-    def skip_checks(cls):
-        """This section is used to evaluate config early and skip all test
-           methods based on these checks
-        """
-        super(TestExampleCase, cls).skip_checks()
-        if not CONF.section.foo
-            cls.skip('A helpful message')
+      @classmethod
+      def skip_checks(cls):
+          """This section is used to evaluate config early and skip all test
+             methods based on these checks
+          """
+          super(TestExampleCase, cls).skip_checks()
+          if not CONF.section.foo
+              cls.skip('A helpful message')
 
-    @classmethod
-    def setup_credentials(cls):
-        """This section is used to do any manual credential allocation and also
-           in the case of dynamic credentials to override the default network
-           resource creation/auto allocation
-        """
-        # This call is used to tell the credential allocator to not create any
-        # network resources for this test case. It also enables selective
-        # creation of other neutron resources. NOTE: it must go before the
-        # super call
-        cls.set_network_resources()
-        super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          """This section is used to do any manual credential allocation and also
+             in the case of dynamic credentials to override the default network
+             resource creation/auto allocation
+          """
+          # This call is used to tell the credential allocator to not create any
+          # network resources for this test case. It also enables selective
+          # creation of other neutron resources. NOTE: it must go before the
+          # super call
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
-    @classmethod
-    def setup_clients(cls):
-        """This section is used to setup client aliases from the manager object
-           or to initialize any additional clients. Except in a few very
-           specific situations you should not need to use this.
-        """
-        super(TestExampleCase, cls).setup_clients()
-        cls.servers_client = cls.os_primary.servers_client
+      @classmethod
+      def setup_clients(cls):
+          """This section is used to setup client aliases from the manager object
+             or to initialize any additional clients. Except in a few very
+             specific situations you should not need to use this.
+          """
+          super(TestExampleCase, cls).setup_clients()
+          cls.servers_client = cls.os_primary.servers_client
 
-    @classmethod
-    def resource_setup(cls):
-        """This section is used to create any resources or objects which are
-           going to be used and shared by **all** test methods in the
-           TestCase. Note then anything created in this section must also be
-           destroyed in the corresponding resource_cleanup() method (which will
-           be run during tearDownClass())
-        """
-        super(TestExampleCase, cls).resource_setup()
-        cls.shared_server = cls.servers_client.create_server(...)
-        cls.addClassResourceCleanup(waiters.wait_for_server_termination,
-                                    cls.servers_client,
-                                    cls.shared_server['id'])
-        cls.addClassResourceCleanup(
-            test_utils.call_and_ignore_notfound_exc(
-                cls.servers_client.delete_server,
-                cls.shared_server['id']))
+      @classmethod
+      def resource_setup(cls):
+          """This section is used to create any resources or objects which are
+             going to be used and shared by **all** test methods in the
+             TestCase. Note then anything created in this section must also be
+             destroyed in the corresponding resource_cleanup() method (which will
+             be run during tearDownClass())
+          """
+          super(TestExampleCase, cls).resource_setup()
+          cls.shared_server = cls.servers_client.create_server(...)
+          cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+                                      cls.servers_client,
+                                      cls.shared_server['id'])
+          cls.addClassResourceCleanup(
+              test_utils.call_and_ignore_notfound_exc(
+                  cls.servers_client.delete_server,
+                  cls.shared_server['id']))
 
 .. _credentials:
 
@@ -150,9 +150,9 @@
 
         credentials = ['primary', 'admin']
 
-    @classmethod
-    def skip_checks(cls):
-    ...
+        @classmethod
+        def skip_checks(cls):
+            ...
 
 In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
 credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources(network=True, subnet=True, router=False)
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources(network=True, subnet=True, router=False)
+          super(TestExampleCase, cls).setup_credentials()
 
 There are 2 quirks with the usage here. First for the set_network_resources
 function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources()
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
 This will not allocate any networking resources. This is because by default all
 the arguments default to False.
@@ -282,8 +282,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      self.os_primary.servers_client.create_server(...)
+      def test_example_create_server(self):
+          self.os_primary.servers_client.create_server(...)
 
 is all you need to do. As described previously, in the above example the
 ``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
 
 The credentials object provides access to all of the credential information you
 would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
-      username = credentials.username
-      user_id = credentials.user_id
-      password = credentials.password
-      tenant_id = credentials.tenant_id
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
+          username = credentials.username
+          user_id = credentials.user_id
+          password = credentials.password
+          tenant_id = credentials.tenant_id
diff --git a/etc/whitelist.yaml b/etc/allow-list.yaml
similarity index 100%
rename from etc/whitelist.yaml
rename to etc/allow-list.yaml
diff --git a/etc/rbac-persona-accounts.yaml.sample b/etc/rbac-persona-accounts.yaml.sample
new file mode 100644
index 0000000..0b59538
--- /dev/null
+++ b/etc/rbac-persona-accounts.yaml.sample
@@ -0,0 +1,108 @@
+- user_domain_name: Default
+  password: password
+  roles:
+    - admin
+  username: tempest-system-admin-1
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-member-1
+  roles:
+    - member
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-reader-1
+  roles:
+    - reader
+  system: all
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-admin-1
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-member-1
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-reader-1
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-admin-1
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-member-1
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-reader-1
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  username: tempest-system-admin-2
+  roles:
+    - admin
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-member-2
+  roles:
+    - member
+  system: all
+- user_domain_name: Default
+  password: password
+  system: all
+  username: tempest-system-reader-2
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-admin-2
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-member-2
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-reader-2
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-admin-2
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-member-2
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-reader-2
+  roles:
+    - reader
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 5f72345..568077e 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -7,11 +7,6 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
   roles:
     - setup-tempest-run-dir
     - setup-tempest-data-dir
@@ -20,5 +15,18 @@
     # IPv6 only env for example Devstack IPv6 settings and services listen
     # address is IPv6 etc. This is invoked before tests are run so that we can
     # fail early if anything missing the IPv6 settings or deployments.
-    - ipv6-only-deployments-verification
-    - run-tempest
+    - devstack-ipv6-only-deployments-verification
+  tasks:
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when:
+        - zuul.branch is defined
+        - zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+    - name: Run Tempest
+      include_role:
+        name: run-tempest
+      when:
+        - zuul.branch is defined
+        - zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 5f87abd..269999c 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,13 +7,48 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
-  roles:
-    - setup-tempest-run-dir
-    - setup-tempest-data-dir
-    - acl-devstack-files
-    - run-tempest
+  tasks:
+    - name: Setup Tempest Run Directory
+      include_role:
+        name: setup-tempest-run-dir
+
+    - name: Setup Tempest Data Directory
+      include_role:
+        name: setup-tempest-data-dir
+
+    - name: ACL devstack files
+      include_role:
+        name: acl-devstack-files
+
+    - name: Run tempest cleanup init-saved-state
+      include_role:
+        name: tempest-cleanup
+      vars:
+        init_saved_state: true
+      when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+            (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
+
+    - name: Run Tempest version <= 26.0.0
+      include_role:
+        name: run-tempest-26
+      when: (zuul.branch is defined and zuul.branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]) or
+            (zuul.override_checkout is defined and zuul.override_checkout in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
+
+    - name: Run Tempest
+      include_role:
+        name: run-tempest
+      when: (zuul.branch is defined and zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"] and zuul.override_checkout is not defined) or
+            (zuul.override_checkout is defined and zuul.override_checkout not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"])
+
+    - name: Run tempest cleanup dry-run
+      include_role:
+        name: tempest-cleanup
+      vars:
+        dry_run: true
+      when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
+
+    - name: Run tempest cleanup
+      include_role:
+        name: tempest-cleanup
+      when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/releasenotes/notes/25/subunt-describe-call-verbose-arg-fix.yaml b/releasenotes/notes/25/subunt-describe-call-verbose-arg-fix.yaml
new file mode 100644
index 0000000..d2a644e
--- /dev/null
+++ b/releasenotes/notes/25/subunt-describe-call-verbose-arg-fix.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+  - |
+    Fixed bug #1890060. tempest subunit_describe_calls --verbose not working with Cliff CLI.
+    The subunit_describe_calls --verbose argument was a boolean and worked in the non Cliff CLI
+    which is now deprecated, but does not work with cliff since --verbase is a standard cliff
+    argument which is an int.  Since the tool is in lib directory we cannot change the interface,
+    so we add a new argument -a --all-stdout that will allow cliff CLI to support the
+    feature in subunnit_describe_calls to print request and response headers and bodies
+    to stdout.
\ No newline at end of file
diff --git a/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
new file mode 100644
index 0000000..33df7c4
--- /dev/null
+++ b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    The following tempest clients for keystone v3 OS_FEDERATION API were
+    implemented in this release
+
+    * identity_providers
+    * protocols
+    * mappings
+    * service_providers
diff --git a/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
new file mode 100644
index 0000000..8069bd3
--- /dev/null
+++ b/releasenotes/notes/Add-volume_size_extend-config--opt-for-volume-tests-041f7d25fc2f3e05.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Adding new config option for volume tests which allows to specify the size
+    a volume will be extended by (if a test does extend a volume or needs
+    a new bigger volume). The option is beneficial in case such tests are
+    executed on systems where the chunk size (the minimum size a volume can be
+    extended by) is other than 1 (originally hardcoded in the tests).:
+
+    CONF.volume.volume_size_extend
diff --git a/releasenotes/notes/Fix-KeyError-bug-in-v3-volumes_client-ff5d9b894f2257c8.yaml b/releasenotes/notes/Fix-KeyError-bug-in-v3-volumes_client-ff5d9b894f2257c8.yaml
new file mode 100644
index 0000000..bbb1901
--- /dev/null
+++ b/releasenotes/notes/Fix-KeyError-bug-in-v3-volumes_client-ff5d9b894f2257c8.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+  - |
+    is_resource_deleted method of v3 volumes_client might have returned
+    a KeyError exception due to an incorrect accessing of a volume id
+    in the case the volume was in error_deleting state.
+    incorrect code - volume['id']
+    correct code - volume['volume']['id']
+    More details about the issue can be found at
+    https://bugs.launchpad.net/tempest/+bug/1887980
diff --git a/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
new file mode 100644
index 0000000..089569e
--- /dev/null
+++ b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+  - |
+    In this release the following tempest arguments are deprecated and
+    replaced by new ones which are functionally equivalent:
+
+     * --black-regex is replaced by --exclude-regex
+     * --blacklist-file is replaced by --exclude-list
+     * --whitelist-file is replaced by --include-list
+
+    For now Tempest supports both (new and old ones) in order to make the
+    transition for all consumers smoother. However, that's just a temporary
+    case and the old options will be removed soon.
diff --git a/releasenotes/notes/Remove-deprecated-image-scenario-options-b573c60e873ab451.yaml b/releasenotes/notes/Remove-deprecated-image-scenario-options-b573c60e873ab451.yaml
new file mode 100644
index 0000000..018d01d
--- /dev/null
+++ b/releasenotes/notes/Remove-deprecated-image-scenario-options-b573c60e873ab451.yaml
@@ -0,0 +1,15 @@
+---
+upgrade:
+  - |
+    The following deprecated image scenario options are removed after a ~4
+    year deprecation period.
+
+    * ``ami_img_file``
+    * ``ari_img_file``
+    * ``aki_img_file``
+
+    Starting Tempest 25.0.0 release, CONF.scenario.img_file need a full path
+    for the image. CONF.scenario.img_dir was deprecated and will be removed
+    in the next release. Till Tempest 25.0.0, old behavior is maintained and
+    keep working but starting Tempest 26.0.0, you need to specify the full path
+    in CONF.scenario.img_file config option.
diff --git a/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml b/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml
new file mode 100644
index 0000000..2514e48
--- /dev/null
+++ b/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+  - |
+    img_dir scenario option has been deprecated and it's being removed.
+    Starting Tempest 25.0.0 release, CONF.scenario.img_file needs a full path
+    for the image. Until this release, old behavior was maintained and kept
+    working however a user needs to specify the full path in
+    CONF.scenario.img_file config option from now on.
diff --git a/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
new file mode 100644
index 0000000..822df7d
--- /dev/null
+++ b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    In this release tempest/manager.py is removed after more than 4 years
+    of deprecation.
diff --git a/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
new file mode 100644
index 0000000..fb84d25
--- /dev/null
+++ b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    The test_reboot_server_soft has been skipped for more than 6 years.
+    Take into account that the minimum scenario test uses soft reboot
+    and the nova functional test also covers reboot.
diff --git a/releasenotes/notes/Set-default-of-operator_role-to-member-f9c3abd2ebde23b7.yaml b/releasenotes/notes/Set-default-of-operator_role-to-member-f9c3abd2ebde23b7.yaml
new file mode 100644
index 0000000..980f4ca
--- /dev/null
+++ b/releasenotes/notes/Set-default-of-operator_role-to-member-f9c3abd2ebde23b7.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    ``Member`` role has been deprecated and replaced by ``member``. Therefore
+    the default value of config option ``[object-storage].operator_role`` is
+    changed to ``member``. (Fixes bug #1330132)
diff --git a/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml b/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml
new file mode 100644
index 0000000..8df5f3c
--- /dev/null
+++ b/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml
@@ -0,0 +1,31 @@
+---
+prelude: >
+    Tempest.scenario.manager is now a stable interface for Tempest plugins.
+features:
+  - |
+    In this release, we made tempest/scenario/manager.py a stable interface
+    ready to be consumed by all tempest plugins. The effort was tracked in
+    the following etherpad [1], and the related review can be listed via [2]:
+
+    * [1] https://etherpad.opendev.org/p/tempest-scenario-manager
+    * [2] https://review.opendev.org/#/q/topic:bp/tempest-scenario-manager-stable(status:open+OR+status:merged)
+
+    Some time ago, tempest/scenario/manager.py got copied to most of the plugins
+    and therefore, it diverged - every plugin's copy had slight differences.
+    In this release, we pushed changes to unify the manager's methods and
+    improved their APIs in order to have them easier consumable:
+
+    * we added implementations of methods that were often used in plugins'
+      manager.py however were not implemented in our manager
+    * we improved APIs by f.e. adding a kwargs argument so that the consumers
+      are more in control of the data that are passed to tempest clients
+    * we modified logic of a few methods so that it complies with the plugins'
+      manager versions in order to prepare for a situation when the plugins
+      can reuse Tempest manager as much as possible rather than keeping their
+      own copy
+    * we made methods consistent w.r.t. names and parameters
+    * we split the lengthy methods to have more readable code
+    * previously private methods which had a potential to be reused were
+      made public so that it's clear they are expected to be used in tempest
+      plugins
+    * missing docstrings have been added
diff --git a/releasenotes/notes/account-generator-accepts-positive-numbers-only-33a366a297494ef7.yaml b/releasenotes/notes/account-generator-accepts-positive-numbers-only-33a366a297494ef7.yaml
new file mode 100644
index 0000000..dfee1db
--- /dev/null
+++ b/releasenotes/notes/account-generator-accepts-positive-numbers-only-33a366a297494ef7.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+  - |
+   Concurrency parameter for account-generator command was accepting
+   negative values and zero. The concurrency parameter now accepts only
+   positive numbers. When a negative value or zero is passed to the
+   program then the program ends and help is displayed.
diff --git a/releasenotes/notes/account_generator-6eb03f664a448c35.yaml b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
new file mode 100644
index 0000000..ade632f
--- /dev/null
+++ b/releasenotes/notes/account_generator-6eb03f664a448c35.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+  - |
+    Remove the deprecated CLI ``tempest-account-generator`` in favor of
+    ``tempest account-generator`` command.
+    You can use ``tempest account-generator`` CLI to generate the accounts
+    yaml file.
diff --git a/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml b/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml
new file mode 100644
index 0000000..de81b2b
--- /dev/null
+++ b/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add project alternate admin, member and reader role for dynamic credentials.
diff --git a/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml b/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml
new file mode 100644
index 0000000..5498688
--- /dev/null
+++ b/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add a new client to handle requests to create and delete assisted volume snapshots.
diff --git a/releasenotes/notes/add-can-migrate-between-any-hosts-config-option-x8ah4f9737a28e9b.yaml b/releasenotes/notes/add-can-migrate-between-any-hosts-config-option-x8ah4f9737a28e9b.yaml
new file mode 100644
index 0000000..26fe01a
--- /dev/null
+++ b/releasenotes/notes/add-can-migrate-between-any-hosts-config-option-x8ah4f9737a28e9b.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - Add a new config option can_migrate_between_any_hosts in the
+    compute-feature-enabled section, which can be set to False for environment
+    with non homogeneous compute nodes, so that it can select a destination
+    host for migrating automatically, otherwise the testcase may fail
+    unexpectedly. e.g., if source host is with CPU "E5-2699 v4" and the
+    selected target host is with CPU "E5-2670 v3", the live-migration will
+    fail because of the downgrade issue.
diff --git a/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml b/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml
new file mode 100644
index 0000000..43a0e8c
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml
@@ -0,0 +1,10 @@
+---
+other:
+  - |
+    A new ``[compute-feature-enabled]ide_bus`` config option has been
+    introduced to indicate if the environment supports attaching disks to an
+    instance using an ``IDE`` bus.
+
+    This currently defaults to ``True`` but should be set to ``False`` when
+    using the libvirt OpenStack Nova virt driver *and* the ``q35`` machine type
+    as support for this bus is no longer provided.
diff --git a/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
new file mode 100644
index 0000000..121e060
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add a new config option ``[compute-feature-enabled] shelve_migrate``
+    which enable test for environment that support cold migration of qcow2
+    unshelved instance.
diff --git a/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
new file mode 100644
index 0000000..1840c10
--- /dev/null
+++ b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
@@ -0,0 +1,13 @@
+---
+features:
+  - |
+    Added methods to the identity v3 roles client to support:
+
+    - PUT /v3/system/users/{user}/roles/{role}
+    - GET /v3/system/users/{user}/roles
+    - GET /v3/system/users/{user}/roles/{role}
+    - DELETE /v3/system/users/{user}/roles/{role}
+    - PUT /v3/system/groups/{group}/roles/{role}
+    - GET /v3/system/groups/{group}/roles
+    - GET /v3/system/groups/{group}/roles/{role}
+    - DELETE /v3/system/groups/{group}/roles/{role}
diff --git a/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
new file mode 100644
index 0000000..159bbe8
--- /dev/null
+++ b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - A new config option in the validation section, image_alt_ssh_user,
+    to specify the user name used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to root.
+  - A new config option in the validation section, image_alt_ssh_password,
+    to specify the password used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to password.
diff --git a/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml b/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml
new file mode 100644
index 0000000..2b407ae
--- /dev/null
+++ b/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added missing client methods for keystone's OS-ENDPOINT-POLICY and
+    OS-EP-FILTER APIs.
diff --git a/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
new file mode 100644
index 0000000..ab8d748
--- /dev/null
+++ b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added missing clients and tests for keystone's v3 EC2 API which already
+    existed for keystone v2.
diff --git a/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml b/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml
new file mode 100644
index 0000000..d31a33c
--- /dev/null
+++ b/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add ``placement`` API usage method for evaluating resource class
+    utilization of the resource provider. The following API call is available
+    for tempest from now in the resource_providers_client:
+
+    * GET /resource_providers/{uuid}/usages
diff --git a/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
new file mode 100644
index 0000000..da58ba3
--- /dev/null
+++ b/releasenotes/notes/add-qos-apis-for-bandwidth-limit-rules-cc144660fcaa419a.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add "QoS bandwidth limit rules" APIs to:
+    "tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py"  module.
+
+    * List bandwidth limit rules for QoS policy
+    * Create bandwidth limit rule
+    * Show bandwidth limit rule details
+    * Update bandwidth limit rule
+    * Delete bandwidth limit rule
\ No newline at end of file
diff --git a/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
new file mode 100644
index 0000000..b65b164
--- /dev/null
+++ b/releasenotes/notes/add-qos-minimum-packet-rule-client-c8bfe09873032d4a.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Added QoS minimum packet rate rule client:
+
+    * create_minimum_packet_rate_rule
+    * update_minimum_packet_rate_rule
+    * show_minimum_packet_rate_rule
+    * list_minimum_packet_rate_rules
+    * delete_minimum_packet_rate_rule also
diff --git a/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
new file mode 100644
index 0000000..2cd5af6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add show type API to v3 types_client library.
+
+    * default_volume_type
diff --git a/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
new file mode 100644
index 0000000..a1bd4c5
--- /dev/null
+++ b/releasenotes/notes/add-subnet-id-config-option-fac3d6f12abfc171.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - A new config option 'subnet_id' is added to section
+    'network' to specify subnet which should be used for
+    allocation of IPs for VMs created during testing.
+    It should be used when the tested network contains more
+    than one subnet otherwise test of external connectivity
+    will fail. (Fixes bug #1856671)
diff --git a/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml b/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml
new file mode 100644
index 0000000..c35dd67
--- /dev/null
+++ b/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add a ``TransfersV355Client`` to the volume v3 ``transfer_client`` library
+    supporting create, list, show, delete, and accept operations for the `new
+    Volume Transfers API
+    <https://docs.openstack.org/api-ref/block-storage/v3/#volume-transfers-volume-transfers-3-55-or-later>`_
+    of the Block Storage API v3.  The min_microversion of this API is 3.55.
diff --git a/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
new file mode 100644
index 0000000..6b10937
--- /dev/null
+++ b/releasenotes/notes/add-worker-file-option-d949121a61156968.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    Add the option --worker-file in ``tempest run`` command. This is to give
+    tempest more granularity to manually configure how the different sets of
+    tests can be grouped to run with the different worker. You can configure
+    tests regex to run under workers. You can also mix manual scheduling with
+    standard one by mentioning concurrency.
+    For example, the user can setup tempest to run with different concurrences,
+    to be used with different regexps.
diff --git a/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
new file mode 100644
index 0000000..8e42e85
--- /dev/null
+++ b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added associate_floating_ip() and dissociate_floating_ip() methods
+    to the scenario manager.
diff --git a/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
new file mode 100644
index 0000000..e53411d
--- /dev/null
+++ b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Added public interface create_loginable_secgroup_rule().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
new file mode 100644
index 0000000..3354f65
--- /dev/null
+++ b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Added public interface create_security_group_rule().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
new file mode 100644
index 0000000..4b31ff8
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-application_credentials-1d4eaef4c3c9dcba.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    Application credentials are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    application credentials testing
+    (``CONF.identity-feature-enabled.application_credentials``)
+    is now enabled by default.
+deprecations:
+  - |
+    Application credentials are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    application credentials testing
+    (``CONF.identity-feature-enabled.application_credentials``)
+    is now deprecated.
diff --git a/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
new file mode 100644
index 0000000..be2df6b
--- /dev/null
+++ b/releasenotes/notes/deprecate-and-enable-identity-project-tags-23b87518888e563a.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now enabled by default.
+deprecations:
+  - |
+    Project tags are supported by Keystone since Queens.
+    As Tempest currently supports only much newer OpenStack versions
+    (Ussuri or later), the configuration option which enables
+    project tags testing
+    (``CONF.identity-feature-enabled.project_tags``)
+    is now deprecated.
diff --git a/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml b/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml
new file mode 100644
index 0000000..4a22f8e
--- /dev/null
+++ b/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+  - |
+    Tempest service clients for image v1 APIs (tempest.lib.services.image.v1 module)
+    is deprecated and will be removed once Tempest stop supporting stable Ussuri release
+    which is last release Image v1 APIs are present in glance.
diff --git a/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml b/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml
new file mode 100644
index 0000000..de05679
--- /dev/null
+++ b/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+  - |
+    Tempest service clients for volume v2 APIs (tempest.lib.services.volume.v2 module)
+    is deprecated and will be removed once Tempest stop supporting stable wallaby release
+    which is last release volume v2 APIs are present in cinder.
diff --git a/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
new file mode 100644
index 0000000..fd7a874
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Wallaby development cycle to
+    mark the end of support for EM Stein release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle.
diff --git a/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
new file mode 100644
index 0000000..36681c7
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-train-83369468215d7485.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Xena development cycle to
+    mark the end of support for EM Train release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Xena development
+    cycle.
diff --git a/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
new file mode 100644
index 0000000..3aaec69
--- /dev/null
+++ b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add a new client to lists, creates, shows information for,
+    updates and deletes neutron floating ips port forwarding
+    resource.
diff --git a/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml b/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml
new file mode 100644
index 0000000..fde6193
--- /dev/null
+++ b/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Adds a method to images_client to get tasks relevant to a given image. Also adds
+    has_version() method to image versions_client to probe for availability of a given
+    API version.
diff --git a/releasenotes/notes/image_import_testing_support-22ba4bcb9f2fb848.yaml b/releasenotes/notes/image_import_testing_support-22ba4bcb9f2fb848.yaml
new file mode 100644
index 0000000..b0180cc
--- /dev/null
+++ b/releasenotes/notes/image_import_testing_support-22ba4bcb9f2fb848.yaml
@@ -0,0 +1,17 @@
+---
+features:
+  - |
+    Add glance image import APIs function to v2
+    images_client library.
+
+    * stage_image_file
+    * info_import
+    * info_stores
+    * image_import
+other:
+  - |
+    New configuration options
+    ``CONF.glance.image_feature_enabled.image_import`` has been introduced
+    to enable the image import tests. If your glance deployement support
+    image import functionality then you can enable the image import tests
+    via this flag. Default value of this new config option is false.
diff --git a/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml b/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml
new file mode 100644
index 0000000..cda3b89
--- /dev/null
+++ b/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+    This is an intermediate release during the Wallaby development cycle to
+    make new functionality available to plugins and other consumers.
diff --git a/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
new file mode 100644
index 0000000..a058137
--- /dev/null
+++ b/releasenotes/notes/introduce-attachments-client-add-show-attachment-api-c3111f7e560a87b3.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    A new attachments client library has been introduced for the volume
+    service.
+
+    Initially only the show_attachment API is provided. This API requires a
+    minimum volume API microversion of ``3.27``.
diff --git a/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
new file mode 100644
index 0000000..311eca3
--- /dev/null
+++ b/releasenotes/notes/limits-client-d656f16a3d3e84fc.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add a new client for keystone's unified limits API to create and update limits.
diff --git a/releasenotes/notes/log-resource-client-20e58a295f729902.yaml b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
new file mode 100644
index 0000000..405fc5f
--- /dev/null
+++ b/releasenotes/notes/log-resource-client-20e58a295f729902.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add a new client to lists, creates, shows information for,
+    and updates neutron log resource.
diff --git a/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
new file mode 100644
index 0000000..2779b26
--- /dev/null
+++ b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Added public interface log_console_output().
+    It used to be a private method with name _log_console_output().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
+
diff --git a/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
new file mode 100644
index 0000000..ac83eaf
--- /dev/null
+++ b/releasenotes/notes/loggable-resource-client-5977d46a7ea52199.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Lists neutron's Loggable resources API service clients are available in
+    ``tempest/lib/services/network/loggable_resource_client.py`` module.
\ No newline at end of file
diff --git a/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml b/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml
new file mode 100644
index 0000000..8931f09
--- /dev/null
+++ b/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+  - |
+    [`bug 1613819 <https://bugs.launchpad.net/tempest/+bug/1613819>`_]
+    admin_domain_name and default_credentials_domain_name parameters
+    under [auth] now affect a domain used for creating test users just
+    as they affect it for projects. Previously a domain with an id set
+    to "default" had to be present in order for test user creation to
+    succeed with Keystone v3.
diff --git a/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
new file mode 100644
index 0000000..ff406fb
--- /dev/null
+++ b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
@@ -0,0 +1,6 @@
+---
+prelude: >
+    The integrated horizon dashboard test is now moved
+    from tempest-horizon plugin into Tempest. You do not need
+    to install tempest-horizon to run the horizon test which
+    can be run using Tempest itself.
diff --git a/releasenotes/notes/network-swap-to-project_id-a1d7fdf6c5e1cf44.yaml b/releasenotes/notes/network-swap-to-project_id-a1d7fdf6c5e1cf44.yaml
new file mode 100644
index 0000000..4cf0c76
--- /dev/null
+++ b/releasenotes/notes/network-swap-to-project_id-a1d7fdf6c5e1cf44.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    [`blueprint blueprint adopt-oslo-versioned-objects-for-db <https://blueprints.launchpad.net/neutron/+spec/adopt-oslo-versioned-objects-for-db>`_]
+    Any reference to "tenant_id" in Network objects is replaced with
+    "project_id".
diff --git a/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
new file mode 100644
index 0000000..1f2d6b9
--- /dev/null
+++ b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    New config option to ``network-feature-enabled``: ``available_features``.
+    This is a list which can contain features that are not discoverable
+    through Neutron API, or it can be the special entry ``all``.
diff --git a/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
new file mode 100644
index 0000000..9e6d49a
--- /dev/null
+++ b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add ``placement`` API methods for testing Routed Provider Networks feature.
+    The following API calls are available for tempest from now in the new
+    resource_providers_client:
+
+    * GET /resource_providers
+    * GET /resource_providers/{uuid}
+    * GET /resource_providers/{uuid}/inventories
+    * GET /resource_providers/{uuid}/aggregates
diff --git a/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
new file mode 100644
index 0000000..42322e4
--- /dev/null
+++ b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    The ``tempest.lib.common.utils.data_utils.random_bytes()`` helper
+    function will no longer allow a ``size`` of more than 1MiB. Tests
+    generally do not need to generate and use large payloads for
+    feature verification and it is easy to lose track of and duplicate
+    large buffers. The sum total of such errors can become problematic
+    in paralllelized and constrained CI environments.
diff --git a/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml b/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml
new file mode 100644
index 0000000..9acb873
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml
@@ -0,0 +1,9 @@
+---
+prelude: >
+    Tempest's identity service client TokenClientJSON and V3TokenClientJSON
+    has been removed.
+upgrade:
+  - |
+    Tempest's identity service client TokenClientJSON and V3TokenClientJSON
+    were deprecated since long which are not removed. Please use new service
+    clients TokenClient and V3TokenClient instead.
diff --git a/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml b/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml
new file mode 100644
index 0000000..f3002f9
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    Deprecated config options to select the Volume API version have been
+    removed. Use ``CONF.volume.catalog_type`` to run volume tests under v3
+    or v2 APIs.
+
+    * ``CONF.volume-feature-enabled.api_v2``
+    * ``CONF.volume-feature-enabled.api_v3``
\ No newline at end of file
diff --git a/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml b/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml
new file mode 100644
index 0000000..ac20340
--- /dev/null
+++ b/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    The old deprecated ``data-ultis`` from ``tempest.common.utils`` has been
+    removed. If you are still using this, use the stable version
+    of ``data-utils`` from new location ``tempest.lib.common.utils``.
diff --git a/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml b/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml
new file mode 100644
index 0000000..3c90f81
--- /dev/null
+++ b/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+    Tempest Service clients for volume v1 APIs are removed.
+upgrade:
+  - |
+    Cinder removed the volume v1 APIs in queens release and Tempest
+    now support only stable train onwards release so all the Tempest
+    service clients for volume v1 APIs are now removed.
diff --git a/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml b/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml
new file mode 100644
index 0000000..af7df93
--- /dev/null
+++ b/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+    Support for RBAC new system scope is added in Tempest.
+features:
+  - |
+    Keystone provides the new scoped token called ``system`` which
+    can be used to query the system scoped API operation. Projects
+    are moving towards the policy with new scope types, Keystone, Nova
+    already provide the new policy for RBAC checks. Tempest has added
+    the support to query the system scoped token from keystone to test
+    the new policy.
+    As next step, we will be moving all the Tempest tests on the project's
+    new policy.
diff --git a/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml b/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml
new file mode 100644
index 0000000..26282f0
--- /dev/null
+++ b/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml
@@ -0,0 +1,36 @@
+---
+features:
+  - |
+    Dynamic credentials now support the scope type for specific roles
+    too along with ``admin``, ``member``, ``reader`` role.
+    Test can specify the scope in the prefix of ``cls.credentials`` name.
+    If ``system`` is prefix in ``cls.credentials`` name then creds will
+    be created with scope as ``system``. If ``domain`` is prefix in
+    ``cls.credentials`` name then creds will be created with scope as
+    ``domain`` otherwise default ``project`` scope will be used.
+    For Example::
+
+        credentials = [['my_role', 'role1'], # this will be old style and project scoped
+                       ['project_my_role', 'role1'], # this will be project scoped
+                       ['domain_my_role', 'role1'], # this will be domain scoped
+                       ['system_my_role', 'role1']] # this will be system scoped
+
+    And below is how test can access the credential manager of respective
+    credentials type::
+
+        cls.os_my_role.any_client
+        cls.os_project_my_role.any_client
+        cls.os_domain_my_role.any_client
+        cls.os_system_my_role.any_client
+
+
+    For backward compatibility, we set the credentials manager class attribute
+    in old style form too which is prefix with ``os_roles_*``, example
+    ``cls.os_roles_my_role`` but we recommend to use the new style attribute
+    as shown above.
+issues:
+  - |
+    Scope support for specific role is not yet added for pre-provisioned credentials.
+fixes:
+  - |
+    Fixes the `bug# 1917168 <https://bugs.launchpad.net/tempest/+bug/1917168>`_
diff --git a/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
new file mode 100644
index 0000000..9e48510
--- /dev/null
+++ b/releasenotes/notes/swift-improve-cleanup-63cfc21ffb3aff6f.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Improve cleanup after Swift testing. Ensures containers are empty before
+    deleting to prevent errors due to delayed execution.
diff --git a/releasenotes/notes/system-scope-44244cc955a7825f.yaml b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
new file mode 100644
index 0000000..969a71f
--- /dev/null
+++ b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    Adds new personas that can be used to test service policies for all
+    default scopes (project, domain, and system) and roles (reader, member,
+    and admin). Both dynamic credentials and pre-provisioned credentials are
+    supported.
diff --git a/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
new file mode 100644
index 0000000..37e56bb
--- /dev/null
+++ b/releasenotes/notes/tempest-ussuri-release-72b5770a3b97678f.yaml
@@ -0,0 +1,16 @@
+---
+prelude: >
+    This release is to tag the Tempest for OpenStack Ussuri release.
+    This release marks the start of Ussuri release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Ussuri
+    * Train
+    * Stein
+
+    Current development of Tempest is for OpenStack Victoria development
+    cycle. Every Tempest commit is also tested against master during
+    the Victoria cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Ussuri (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Ussuri release.
diff --git a/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
new file mode 100644
index 0000000..574f6d9
--- /dev/null
+++ b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+    This release is to tag the Tempest for OpenStack Victoria release.
+    This release marks the start of Victoria release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+    * Stein
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle. Every Tempest commit is also tested against master during
+    the Wallaby cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Victoria (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Victoria release.
diff --git a/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml b/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml
new file mode 100644
index 0000000..25fb1f2
--- /dev/null
+++ b/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Wallaby release.
+    This release marks the start of Wallaby release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Wallaby
+    * Victoria
+    * Ussuri
+    * Train
+
+    Current development of Tempest is for OpenStack Xena development
+    cycle. Every Tempest commit is also tested against master during
+    the Xena cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Xena (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Wallaby release.
diff --git a/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
new file mode 100644
index 0000000..218d8ca
--- /dev/null
+++ b/releasenotes/notes/tempest-xena-release-3ffb30eb59e49f2c.yaml
@@ -0,0 +1,18 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Xena release.
+    This release marks the start of Xena release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Xena
+    * Wallaby
+    * Victoria
+    * Ussuri
+
+    Current development of Tempest is for OpenStack Yoga development
+    cycle. Every Tempest commit is also tested against master during
+    the Yoga cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Yoga (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Xena release.
+
diff --git a/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml b/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml
new file mode 100644
index 0000000..4d26210
--- /dev/null
+++ b/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+  - |
+    A number of Compute APIs that only worked with the XenAPI virt driver have
+    been removed in the Compute service. As a result, their corresponding tests
+    are now disabled by default. They can be re-enabled using the new
+    ``[compute_feature_enabled] xenapi_apis`` config option.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 92df4c4..b353a18 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -42,9 +42,9 @@
 ]
 
 # openstackdocstheme options
-repository_name = 'openstack/tempest'
-bug_project = 'tempest'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack/tempest'
+openstackdocs_bug_project = 'tempest'
+openstackdocs_bug_tag = ''
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-copyright = u'2016, tempest Developers'
+copyright = '2016, tempest Developers'
 
 # Release do not need a version number in the title, they
 # cover multiple versions.
@@ -98,7 +98,7 @@
 # show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
 
 # A list of ignored prefixes for module index sorting.
 # modindex_common_prefix = []
@@ -194,8 +194,8 @@
 #  author, documentclass [howto, manual, or own class]).
 latex_documents = [
     ('index', 'olso.configReleaseNotes.tex',
-     u'olso.config Release Notes Documentation',
-     u'tempest Developers', 'manual'),
+     'olso.config Release Notes Documentation',
+     'tempest Developers', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
@@ -225,8 +225,8 @@
 # (source start file, name, description, authors, manual section).
 man_pages = [
     ('index', 'olso.configreleasenotes',
-     u'tempest Release Notes Documentation',
-     [u'tempest Developers'], 1)
+     'tempest Release Notes Documentation',
+     ['tempest Developers'], 1)
 ]
 
 # If true, show URL addresses after external links.
@@ -240,8 +240,8 @@
 #  dir menu entry, description, category)
 texinfo_documents = [
     ('index', 'tempestReleaseNotes',
-     u'tempest Release Notes Documentation',
-     u'tempest Developers', 'olso.configReleaseNotes',
+     'tempest Release Notes Documentation',
+     'tempest Developers', 'olso.configReleaseNotes',
      'An OpenStack library for parsing configuration options from the command'
      ' line and configuration files.',
      'Miscellaneous'),
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index bfd8b2d..6a1f8b4 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,13 @@
    :maxdepth: 1
 
    unreleased
+   v29.0.0
+   v28.1.0
+   v28.0.0
+   v27.0.0
+   v26.1.0
+   v26.0.0
+   v24.0.0
    v23.0.0
    v22.1.0
    v22.0.0
diff --git a/releasenotes/source/v24.0.0.rst b/releasenotes/source/v24.0.0.rst
new file mode 100644
index 0000000..8131975
--- /dev/null
+++ b/releasenotes/source/v24.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v24.0.0 Release Notes
+=====================
+
+.. release-notes:: 24.0.0 Release Notes
+   :version: 24.0.0
diff --git a/releasenotes/source/v26.0.0.rst b/releasenotes/source/v26.0.0.rst
new file mode 100644
index 0000000..4161f89
--- /dev/null
+++ b/releasenotes/source/v26.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.0.0 Release Notes
+=====================
+.. release-notes:: 26.0.0 Release Notes
+   :version: 26.0.0
diff --git a/releasenotes/source/v26.1.0.rst b/releasenotes/source/v26.1.0.rst
new file mode 100644
index 0000000..554cf1f
--- /dev/null
+++ b/releasenotes/source/v26.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.1.0 Release Notes
+=====================
+.. release-notes:: 26.1.0 Release Notes
+   :version: 26.1.0
diff --git a/releasenotes/source/v27.0.0.rst b/releasenotes/source/v27.0.0.rst
new file mode 100644
index 0000000..0009124
--- /dev/null
+++ b/releasenotes/source/v27.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v27.0.0 Release Notes
+=====================
+.. release-notes:: 27.0.0 Release Notes
+   :version: 27.0.0
diff --git a/releasenotes/source/v28.0.0.rst b/releasenotes/source/v28.0.0.rst
new file mode 100644
index 0000000..19d4218
--- /dev/null
+++ b/releasenotes/source/v28.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.0.0 Release Notes
+=====================
+.. release-notes:: 28.0.0 Release Notes
+   :version: 28.0.0
diff --git a/releasenotes/source/v28.1.0.rst b/releasenotes/source/v28.1.0.rst
new file mode 100644
index 0000000..3cc3478
--- /dev/null
+++ b/releasenotes/source/v28.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v28.1.0 Release Notes
+=====================
+.. release-notes:: 28.1.0 Release Notes
+   :version: 28.1.0
diff --git a/releasenotes/source/v29.0.0.rst b/releasenotes/source/v29.0.0.rst
new file mode 100644
index 0000000..d367a59
--- /dev/null
+++ b/releasenotes/source/v29.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v29.0.0 Release Notes
+=====================
+.. release-notes:: 29.0.0 Release Notes
+   :version: 29.0.0
diff --git a/requirements.txt b/requirements.txt
index bf38fae..c71cabe 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,22 +3,21 @@
 # process, which may cause wedges in the gate later.
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 cliff!=2.9.0,>=2.8.0 # Apache-2.0
-jsonschema>=2.6.0 # MIT
+jsonschema>=3.2.0 # MIT
 testtools>=2.2.0 # MIT
-paramiko>=2.0.0 # LGPLv2.1+
+paramiko>=2.7.0 # LGPLv2.1+
 netaddr>=0.7.18 # BSD
 oslo.concurrency>=3.26.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
 oslo.log>=3.36.0 # Apache-2.0
 stestr>=1.0.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.utils>=3.33.0 # Apache-2.0
-six>=1.10.0 # MIT
+oslo.utils>=4.7.0 # Apache-2.0
 fixtures>=3.0.0 # Apache-2.0/BSD
 PyYAML>=3.12 # MIT
 python-subunit>=1.0.0 # Apache-2.0/BSD
 stevedore>=1.20.0 # Apache-2.0
-PrettyTable<0.8,>=0.7.1 # BSD
+PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
 unittest2>=1.1.0 # BSD
diff --git a/roles/ipv6-only-deployments-verification/README.rst b/roles/ipv6-only-deployments-verification/README.rst
deleted file mode 100644
index 400a8da..0000000
--- a/roles/ipv6-only-deployments-verification/README.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-Verify the IPv6-only deployments
-
-This role needs to be invoked from a playbook that
-run tests. This role verifies the IPv6 setting on
-devstack side and devstack deploy services on IPv6.
-This role is invoked before tests are run so that
-if any missing IPv6 setting or deployments can fail
-the job early.
-
-
-**Role Variables**
-
-.. zuul:rolevar:: devstack_base_dir
-   :default: /opt/stack
-
-   The devstack base directory.
diff --git a/roles/ipv6-only-deployments-verification/defaults/main.yaml b/roles/ipv6-only-deployments-verification/defaults/main.yaml
deleted file mode 100644
index fea05c8..0000000
--- a/roles/ipv6-only-deployments-verification/defaults/main.yaml
+++ /dev/null
@@ -1 +0,0 @@
-devstack_base_dir: /opt/stack
diff --git a/roles/ipv6-only-deployments-verification/tasks/main.yaml b/roles/ipv6-only-deployments-verification/tasks/main.yaml
deleted file mode 100644
index d73c79c..0000000
--- a/roles/ipv6-only-deployments-verification/tasks/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-- name: Verify the ipv6-only deployments
-  become: true
-  become_user: stack
-  shell: "{{ devstack_base_dir }}/tempest/tools/verify-ipv6-only-deployments.sh"
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
deleted file mode 100644
index a8447d2..0000000
--- a/roles/process-stackviz/README.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-Generate stackviz report.
-
-Generate stackviz report using subunit and dstat data, using
-the stackviz archive embedded in test images.
-
-**Role Variables**
-
-.. zuul:rolevar:: devstack_base_dir
-   :default: /opt/stack
-
-   The devstack base directory.
-
-.. zuul:rolevar:: stage_dir
-   :default: "{{ ansible_user_dir }}"
-
-   The stage directory where the input data can be found and
-   the output will be produced.
-
-.. zuul:rolevar:: zuul_work_dir
-   :default: {{ devstack_base_dir }}/tempest
-
-   Directory to work in. It has to be a fully qualified path.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
deleted file mode 100644
index f3bc32b..0000000
--- a/roles/process-stackviz/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-devstack_base_dir: /opt/stack
-stage_dir: "{{ ansible_user_dir }}"
-zuul_work_dir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
deleted file mode 100644
index 3724e0e..0000000
--- a/roles/process-stackviz/tasks/main.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-- name: Check if stackviz archive exists
-  stat:
-    path: "/opt/cache/files/stackviz-latest.tar.gz"
-  register: stackviz_archive
-
-- debug:
-    msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
-  when: not stackviz_archive.stat.exists
-
-- name: Check if subunit data exists
-  stat:
-    path: "{{ zuul_work_dir }}/testrepository.subunit"
-  register: subunit_input
-
-- debug:
-    msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
-  when: not subunit_input.stat.exists
-
-- name: Install stackviz
-  pip:
-    name: "file://{{ stackviz_archive.stat.path }}"
-    virtualenv: /tmp/stackviz
-    extra_args: -U
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Deploy stackviz static html+js
-  command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Check if dstat data exists
-  stat:
-    path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
-  register: dstat_input
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Run stackviz with dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - dstat_input.stat.exists
-  failed_when: False
-
-- name: Run stackviz without dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - not dstat_input.stat.exists
-  failed_when: False
diff --git a/roles/run-tempest-26/README.rst b/roles/run-tempest-26/README.rst
new file mode 100644
index 0000000..3643edb
--- /dev/null
+++ b/roles/run-tempest-26/README.rst
@@ -0,0 +1,83 @@
+Run Tempest
+
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: tempest_concurrency
+   :default: 0
+
+   The number of parallel test processes.
+
+.. zuul:rolevar:: tempest_test_regex
+   :default: ''
+
+   A regular expression used to select the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+   In the following example only api scenario and third party tests
+   will be executed.
+
+       ::
+           vars:
+             tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
+
+.. zuul:rolevar:: tempest_test_blacklist
+
+   Specifies a blacklist file to skip tests that are not needed.
+
+   Pass a full path to the file.
+
+.. zuul:rolevar:: tox_envlist
+   :default: smoke
+
+   The Tempest tox environment to run.
+
+.. zuul:rolevar:: tempest_black_regex
+   :default: ''
+
+   A regular expression used to skip the tests.
+
+   It works only when used with some specific tox environments
+   ('all', 'all-plugin'.)
+
+       ::
+           vars:
+             tempest_black_regex: (tempest.api.identity).*$
+
+.. zuul:rolevar:: tox_extra_args
+   :default: ''
+
+   String of extra command line options to pass to tox.
+
+   Here is an example of running tox with --sitepackages option:
+
+       ::
+           vars:
+             tox_extra_args: --sitepackages
+
+.. zuul:rolevar:: tempest_test_timeout
+   :default: ''
+
+   The timeout (in seconds) for each test.
+
+.. zuul:rolevar:: stable_constraints_file
+   :default: ''
+
+   Upper constraints file to be used for stable branch till stable/rocky.
+
+.. zuul:rolevar:: tempest_tox_environment
+   :default: ''
+
+   Environment variable to set for run-tempst task.
+
+   Env variables set in this variable will be combined with some more
+   defaults env variable set at runtime.
diff --git a/roles/run-tempest-26/defaults/main.yaml b/roles/run-tempest-26/defaults/main.yaml
new file mode 100644
index 0000000..cbac76d
--- /dev/null
+++ b/roles/run-tempest-26/defaults/main.yaml
@@ -0,0 +1,12 @@
+devstack_base_dir: /opt/stack
+tempest_test_regex: ''
+tox_envlist: smoke
+tempest_black_regex: ''
+tox_extra_args: ''
+tempest_test_timeout: ''
+stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
+target_branch: "{{ zuul.branch }}"
+tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
\ No newline at end of file
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
new file mode 100644
index 0000000..f846006
--- /dev/null
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -0,0 +1,73 @@
+# NOTE(andreaf) The number of vcpus is not available on all systems.
+# See https://github.com/ansible/ansible/issues/30688
+# When not available, we fall back to ansible_processor_cores
+- name: Get hw.logicalcpu from sysctl
+  shell: sysctl hw.logicalcpu | cut -d' ' -f2
+  register: sysctl_hw_logicalcpu
+  when: ansible_processor_vcpus is not defined
+
+- name: Number of cores
+  set_fact:
+    num_cores: "{{ansible_processor_vcpus|default(sysctl_hw_logicalcpu.stdout)}}"
+
+- name: Set concurrency for cores == 3 or less
+  set_fact:
+    default_concurrency: "{{ num_cores }}"
+  when: num_cores|int <= 3
+
+- name: Limit max concurrency when more than 3 vcpus are available
+  set_fact:
+    default_concurrency: "{{ num_cores|int // 2 }}"
+  when: num_cores|int > 3
+
+- name: Override target branch
+  set_fact:
+    target_branch: "{{ zuul.override_checkout }}"
+  when: zuul.override_checkout is defined
+
+- name: Use stable branch upper-constraints till stable/stein
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+  when:
+    - devstack_localrc is defined
+    - "'TEMPEST_BRANCH' in devstack_localrc"
+    - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+    - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+    - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'default'
+
+- name: Set OS_TEST_TIMEOUT if requested
+  set_fact:
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
+  when: tempest_test_timeout != ''
+
+- when:
+    - tempest_test_blacklist is defined
+  block:
+    - name: Check for test blacklist file
+      stat:
+        path: "{{ tempest_test_blacklist }}"
+      register:
+        blacklist_stat
+
+    - name: Build blacklist option
+      set_fact:
+        blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
+      when: blacklist_stat.stat.exists
+
+- name: Run Tempest
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+            --concurrency={{tempest_concurrency|default(default_concurrency)}} \
+            --black-regex={{tempest_black_regex|quote}}
+  args:
+    chdir: "{{devstack_base_dir}}/tempest"
+  register: tempest_run_result
+  become: true
+  become_user: tempest
+  environment: "{{ tempest_tox_environment }}"
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 1f7fb70..0c72b69 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -1,5 +1,8 @@
 Run Tempest
 
+The result of the tempest run is stored in the `tempest_run_result`
+variable (through the `register` statement).
+
 **Role Variables**
 
 .. zuul:rolevar:: devstack_base_dir
@@ -20,18 +23,20 @@
    It works only when used with some specific tox environments
    ('all', 'all-plugin'.)
 
-   Multi-line and commented regexs can be achieved by doing this:
+   In the following example only api scenario and third party tests
+   will be executed.
 
        ::
            vars:
-             tempest_test_regex: |
-               (?x)    # Ignore comments and whitespaces
-               # Line with only a comment.
-               (tempest\.(api|scenario|thirdparty)).*$    # Run only api scenario and third party
+             tempest_test_regex: (tempest\.(api|scenario|thirdparty)).*$
 
 .. zuul:rolevar:: tempest_test_blacklist
 
-   Specifies a blacklist file to skip tests that are not needed.
+   DEPRECATED option, please use tempest_test_exclude_list instead.
+
+.. zuul:rolevar:: tempest_test_exclude_list
+
+   Specifies an excludelist file to skip tests that are not needed.
 
    Pass a full path to the file.
 
@@ -43,19 +48,19 @@
 .. zuul:rolevar:: tempest_black_regex
    :default: ''
 
+   DEPRECATED option, please use tempest_exclude_regex instead.
+
+.. zuul:rolevar:: tempest_exclude_regex
+   :default: ''
+
    A regular expression used to skip the tests.
 
    It works only when used with some specific tox environments
    ('all', 'all-plugin'.)
 
-   Multi-line and commented regexs can be achieved by doing this:
-
        ::
            vars:
-             tempest_black_regex: |
-               (?x)    # Ignore comments and whitespaces
-               # Line with only a comment.
-               (tempest.api.identity).*$
+             tempest_exclude_regex: (tempest.api.identity).*$
 
 .. zuul:rolevar:: tox_extra_args
    :default: ''
@@ -76,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/rocky.
+   Upper constraints file to be used for stable branch till stable/stein.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 5867b6c..5f13883 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -1,9 +1,11 @@
 devstack_base_dir: /opt/stack
 tempest_test_regex: ''
 tox_envlist: smoke
-tempest_black_regex: ''
 tox_extra_args: ''
 tempest_test_timeout: ''
 stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
 target_branch: "{{ zuul.branch }}"
 tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 8686f9a..a8b3ede 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,16 +25,31 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/rocky
+- name: Use stable branch upper-constraints till stable/stein
   set_fact:
-    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky"]
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+  when:
+    - devstack_localrc is defined
+    - "'TEMPEST_BRANCH' in devstack_localrc"
+    - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+    - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+    - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'master'
 
 - name: Set OS_TEST_TIMEOUT if requested
   set_fact:
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
   when: tempest_test_timeout != ''
 
+# TODO(kopecmartin) remove the following 'when block' after all consumers of
+# the role have switched to tempest_test_exclude_list option, until then it's
+# kept here for backward compatibility
 - when:
     - tempest_test_blacklist is defined
   block:
@@ -49,12 +64,45 @@
         blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
       when: blacklist_stat.stat.exists
 
+- when:
+    - tempest_test_exclude_list is defined
+  block:
+    - name: Check for test exclude list file
+      stat:
+        path: "{{ tempest_test_exclude_list }}"
+      register:
+        exclude_list_stat
+
+    - name: Build exclude list option
+      set_fact:
+        exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
+      when: exclude_list_stat.stat.exists
+
+# TODO(kopecmartin) remove this after all consumers of the role have switched
+# to tempest_exclude_regex option, until then it's kept here for the backward
+# compatibility
+- name: Build exclude regex (old param)
+  set_fact:
+    tempest_test_exclude_regex: "--black-regex={{tempest_black_regex|quote}}"
+  when:
+    - tempest_black_regex is defined
+    - tempest_exclude_regex is not defined
+
+- name: Build exclude regex (new param)
+  set_fact:
+    tempest_test_exclude_regex: "--exclude-regex={{tempest_exclude_regex|quote}}"
+  when:
+    - tempest_black_regex is not defined
+    - tempest_exclude_regex is defined
+
 - name: Run Tempest
-  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
+           {{blacklist_option|default('')}}  {{exclude_list_option|default('')}} \
             --concurrency={{tempest_concurrency|default(default_concurrency)}} \
-            --black-regex={{tempest_black_regex|quote}}
+           {{tempest_test_exclude_regex|default('')}}
   args:
     chdir: "{{devstack_base_dir}}/tempest"
+  register: tempest_run_result
   become: true
   become_user: tempest
   environment: "{{ tempest_tox_environment }}"
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
new file mode 100644
index 0000000..d1fad90
--- /dev/null
+++ b/roles/tempest-cleanup/README.rst
@@ -0,0 +1,61 @@
+Tempest cleanup
+===============
+
+Documentation regarding tempest cleanup can be found at the following
+link:
+https://docs.openstack.org/tempest/latest/cleanup.html
+
+When init_saved_state and dry_run variables are set to false, the role
+execution will run tempest cleanup which deletes resources not present in the
+saved_state.json file.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: init_saved_state
+   :default: false
+
+   When true, tempest cleanup --init-saved-state will be executed which
+   initializes the saved state of the OpenStack deployment and will output
+   a saved_state.json file containing resources from the deployment that will
+   be preserved from the cleanup command. This should be done prior to running
+   Tempest tests.
+
+.. zuul:rolevar:: dry_run
+   :default: false
+
+   When true, tempest cleanup creates a report (./dry_run.json) of the
+   resources that would be cleaned up if the role was ran with dry_run option
+   set to false.
+
+.. zuul:rolevar:: run_tempest_fail_if_leaked_resources
+   :default: false
+
+   When true, the role will fail if any leaked resources are detected.
+   The detection is done via dry_run.json file which if contains any resources,
+   some must have been leaked. This can be also used to verify that tempest
+   cleanup was successful.
+
+
+Role usage
+----------
+
+The role can be also used for verification that tempest tests don't leak any
+resources or to test that 'tempest cleanup' command deleted all leaked
+resources as expected.
+Either way the role needs to be run first with init_saved_state variable set
+to true prior any tempest tests got executed.
+Then, after tempest tests got executed this role needs to be run again with
+role variables set according to the desired outcome:
+
+1. to verify that tempest tests don't leak any resources
+   run_tempest_dry_cleanup and run_tempest_fail_if_leaked_resources have to
+   be set to true.
+
+2. to check that 'tempest cleanup' command deleted all the leaked resources
+   run_tempest_cleanup and run_tempest_fail_if_leaked_resources have to be set
+   to true.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
new file mode 100644
index 0000000..ce78bdb
--- /dev/null
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -0,0 +1,4 @@
+devstack_base_dir: /opt/stack
+init_saved_state: false
+dry_run: false
+run_tempest_fail_if_leaked_resources: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
new file mode 100644
index 0000000..46749ab
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -0,0 +1,7 @@
+---
+- name: Run tempest cleanup dry-run
+  become: yes
+  become_user: tempest
+  command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+  args:
+    chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/tempest-cleanup/tasks/dry_run_checker.py b/roles/tempest-cleanup/tasks/dry_run_checker.py
new file mode 100644
index 0000000..9cd9a85
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run_checker.py
@@ -0,0 +1,71 @@
+# Copyright 2020 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for content checking of a given dry_run.json file.
+"""
+
+import argparse
+import json
+import sys
+
+
+def get_parser():
+    parser = argparse.ArgumentParser(__doc__)
+    parser.add_argument('--is-empty', action="store_true", dest='is_empty',
+                        default=False,
+                        help="""Are values of a given dry_run.json empty?""")
+    parser.add_argument('--file', dest='file', default=None, metavar='PATH',
+                        help="A path to a dry_run.json file.")
+    return parser
+
+
+def parse_arguments():
+    parser = get_parser()
+    args = parser.parse_args()
+    if not args.file:
+        sys.stderr.write('Path to a dry_run.json must be specified.\n')
+        sys.exit(1)
+    return args
+
+
+def load_json(path):
+    """Load json content from file addressed by path."""
+    try:
+        with open(path, 'rb') as json_file:
+            json_data = json.load(json_file)
+    except Exception as ex:
+        sys.exit(ex)
+    return json_data
+
+
+def are_values_empty(dry_run_content):
+    """Return true if values of dry_run.json are empty."""
+    for value in dry_run_content.values():
+        if value:
+            return False
+    return True
+
+
+def main():
+    args = parse_arguments()
+    content = load_json(args.file)
+    if args.is_empty:
+        if not are_values_empty(content):
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
new file mode 100644
index 0000000..c1d63f0
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -0,0 +1,46 @@
+- when: init_saved_state
+  block:
+    - name: Run tempest cleanup init-saved-state
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --init-saved-state --debug
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+
+    - name: Cat saved_state.json
+      command: cat "{{ devstack_base_dir }}/tempest/saved_state.json"
+
+- when: dry_run
+  block:
+    - import_tasks: dry_run.yaml
+
+    - name: Cat dry_run.json
+      command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
+
+- when:
+    - not dry_run
+    - not init_saved_state
+  block:
+    - name: Run tempest cleanup
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --debug
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+
+- when:
+    - run_tempest_fail_if_leaked_resources
+    - not init_saved_state
+  block:
+    # let's run dry run again, if haven't already, to check no leftover
+    # resources were left behind after the cleanup in the previous task
+    - import_tasks: dry_run.yaml
+      when: not dry_run
+
+    - name: Fail if any resources are leaked
+      become: yes
+      become_user: tempest
+      shell: |
+        python3 roles/tempest-cleanup/tasks/dry_run_checker.py --file {{ devstack_base_dir }}/tempest/dry_run.json --is-empty
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/setup.cfg b/setup.cfg
index d246c68..a41eccf 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,12 +1,12 @@
 [metadata]
 name = tempest
 summary = OpenStack Integration Testing
-description-file =
+description_file =
     README.rst
 author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://docs.openstack.org/tempest/latest/
-requires-python = >=3.6
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://docs.openstack.org/tempest/latest/
+python_requires = >=3.6
 classifier =
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -17,6 +17,8 @@
     Programming Language :: Python :: 3
     Programming Language :: Python :: 3.6
     Programming Language :: Python :: 3.7
+    Programming Language :: Python :: 3.8
+    Programming Language :: Python :: 3.9
     Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: Implementation :: CPython
 
@@ -28,7 +30,6 @@
 
 [entry_points]
 console_scripts =
-    tempest-account-generator = tempest.cmd.account_generator:main
     tempest = tempest.cmd.main:main
     skip-tracker = tempest.lib.cmd.skip_tracker:main
     check-uuid = tempest.lib.cmd.check_uuid:run
@@ -48,6 +49,3 @@
     run = tempest.cmd.run:TempestRun
 oslo.config.opts =
     tempest.config = tempest.config:list_opts
-
-[wheel]
-universal = 1
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 0901374..4cc5fdd 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -13,12 +13,22 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
 
+
+# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
 class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Agents API"""
+    """Tests Compute Agents API"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(AgentsAdminTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise cls.skipException('The os-agents API is not supported.')
 
     @classmethod
     def setup_clients(cls):
@@ -46,7 +56,7 @@
 
     @decorators.idempotent_id('1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90')
     def test_create_agent(self):
-        # Create an agent.
+        """Test creating a compute agent"""
         params = self._param_helper(
             hypervisor='kvm', os='win', architecture='x86',
             version='7.0', url='xxx://xxxx/xxx/xxx',
@@ -58,6 +68,7 @@
 
     @decorators.idempotent_id('dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e')
     def test_update_agent(self):
+        """Test updating a compute agent"""
         # Create and update an agent.
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
@@ -71,7 +82,7 @@
 
     @decorators.idempotent_id('470e0b89-386f-407b-91fd-819737d0b335')
     def test_delete_agent(self):
-        # Create an agent and delete it.
+        """Test deleting a compute agent"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.client.delete_agent(body['agent_id'])
 
@@ -82,7 +93,7 @@
 
     @decorators.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
     def test_list_agents(self):
-        # Create an agent and  list all agents.
+        """Test listing compute agents"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
         agents = self.client.list_agents()['agents']
@@ -91,7 +102,7 @@
 
     @decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
     def test_list_agents_with_filter(self):
-        # Create agents and list the agent builds by the filter.
+        """Test listing compute agents by the filter"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
         params = self._param_helper(
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 7a3bfdf..2716259 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -71,10 +71,11 @@
 
 
 class AggregatesAdminTestJSON(AggregatesAdminTestBase):
+    """Tests Aggregates API that require admin privileges"""
 
     @decorators.idempotent_id('0d148aa3-d54c-4317-aa8d-42040a475e20')
     def test_aggregate_create_delete(self):
-        # Create and delete an aggregate.
+        """Test create/delete aggregate"""
         aggregate = self._create_test_aggregate()
         self.assertIsNone(aggregate['availability_zone'])
 
@@ -83,7 +84,7 @@
 
     @decorators.idempotent_id('5873a6f8-671a-43ff-8838-7ce430bb6d0b')
     def test_aggregate_create_delete_with_az(self):
-        # Create and delete an aggregate.
+        """Test create/delete aggregate with availability_zone"""
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self._create_test_aggregate(availability_zone=az_name)
         self.assertEqual(az_name, aggregate['availability_zone'])
@@ -93,7 +94,7 @@
 
     @decorators.idempotent_id('68089c38-04b1-4758-bdf0-cf0daec4defd')
     def test_aggregate_create_verify_entry_in_list(self):
-        # Create an aggregate and ensure it is listed.
+        """Test listing aggregate should contain the created aggregate"""
         aggregate = self._create_test_aggregate()
         aggregates = self.client.list_aggregates()['aggregates']
         self.assertIn((aggregate['id'], aggregate['availability_zone']),
@@ -102,7 +103,7 @@
 
     @decorators.idempotent_id('36ec92ca-7a73-43bc-b920-7531809e8540')
     def test_aggregate_create_update_metadata_get_details(self):
-        # Create an aggregate and ensure its details are returned.
+        """Test set/get aggregate metadata"""
         aggregate = self._create_test_aggregate()
         body = self.client.show_aggregate(aggregate['id'])['aggregate']
         self.assertEqual(aggregate['name'], body['name'])
@@ -121,7 +122,7 @@
 
     @decorators.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
     def test_aggregate_create_update_with_az(self):
-        # Update an aggregate and ensure properties are updated correctly
+        """Test create/update aggregate with availability_zone"""
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self._create_test_aggregate(
@@ -148,7 +149,7 @@
 
     @decorators.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
     def test_aggregate_add_remove_host(self):
-        # Add a host to the given aggregate and remove.
+        """Test adding host to and removing host from aggregate"""
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self._create_test_aggregate(name=aggregate_name)
@@ -169,7 +170,10 @@
 
     @decorators.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
     def test_aggregate_add_host_list(self):
-        # Add a host to the given aggregate and list.
+        """Test listing aggregate contains the host added to the aggregate
+
+        Add a host to the given aggregate and list.
+        """
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self._create_test_aggregate(name=aggregate_name)
@@ -188,7 +192,10 @@
 
     @decorators.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
     def test_aggregate_add_host_get_details(self):
-        # Add a host to the given aggregate and get details.
+        """Test showing aggregate contains the host added to the aggregate
+
+        Add a host to the given aggregate and get details.
+        """
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         aggregate = self._create_test_aggregate(name=aggregate_name)
@@ -204,7 +211,7 @@
 
     @decorators.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
     def test_aggregate_add_host_create_server_with_az(self):
-        # Add a host to the given aggregate and create a server.
+        """Test adding a host to the given aggregate and creating a server"""
         self.useFixture(fixtures.LockFixture('availability_zone'))
         az_name = data_utils.rand_name(self.az_name_prefix)
         aggregate = self._create_test_aggregate(availability_zone=az_name)
@@ -233,6 +240,11 @@
 
 
 class AggregatesAdminTestV241(AggregatesAdminTestBase):
+    """Tests Aggregates API that require admin privileges
+
+    Tests Aggregates API that require admin privileges with compute
+    microversion greater than 2.40.
+    """
     min_microversion = '2.41'
 
     # NOTE(gmann): This test tests the Aggregate APIs response schema
@@ -241,6 +253,11 @@
 
     @decorators.idempotent_id('fdf24d9e-8afa-4700-b6aa-9c498351504f')
     def test_create_update_show_aggregate_add_remove_host(self):
+        """Test response schema of aggregates API
+
+        Test response schema of aggregates API(create/update/show/add host/
+        remove host) with compute microversion greater than 2.40.
+        """
         # Update and add a host to the given aggregate and get details.
         self.useFixture(fixtures.LockFixture('availability_zone'))
         # Checking create aggregate API response schema
diff --git a/tempest/api/compute/admin/test_aggregates_negative.py b/tempest/api/compute/admin/test_aggregates_negative.py
index a6e0efa..7b115ce 100644
--- a/tempest/api/compute/admin/test_aggregates_negative.py
+++ b/tempest/api/compute/admin/test_aggregates_negative.py
@@ -49,7 +49,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('86a1cb14-da37-4a70-b056-903fd56dfe29')
     def test_aggregate_create_as_user(self):
-        # Regular user is not allowed to create an aggregate.
+        """Regular user is not allowed to create an aggregate"""
         aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
         self.assertRaises(lib_exc.Forbidden,
                           self.aggregates_client.create_aggregate,
@@ -58,7 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3b8a1929-3793-4e92-bcb4-dfa572ee6c1d')
     def test_aggregate_create_aggregate_name_length_less_than_1(self):
-        # the length of aggregate name should >= 1 and <=255
+        """The length of aggregate name should >=1"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.create_aggregate,
                           name='')
@@ -66,7 +66,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4c194563-543b-4e70-a719-557bbe947fac')
     def test_aggregate_create_aggregate_name_length_exceeds_255(self):
-        # the length of aggregate name should >= 1 and <=255
+        """The length of aggregate name should <=255"""
         aggregate_name = 'a' * 256
         self.assertRaises(lib_exc.BadRequest,
                           self.client.create_aggregate,
@@ -75,7 +75,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c23a291-b0b1-487b-b464-132e061151b3')
     def test_aggregate_create_with_existent_aggregate_name(self):
-        # creating an aggregate with existent aggregate name is forbidden
+        """Creating an aggregate with existent aggregate name is forbidden"""
         aggregate = self._create_test_aggregate()
         self.assertRaises(lib_exc.Conflict,
                           self.client.create_aggregate,
@@ -84,7 +84,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('cd6de795-c15d-45f1-8d9e-813c6bb72a3d')
     def test_aggregate_delete_as_user(self):
-        # Regular user is not allowed to delete an aggregate.
+        """Regular user is not allowed to delete an aggregate"""
         aggregate = self._create_test_aggregate()
         self.assertRaises(lib_exc.Forbidden,
                           self.aggregates_client.delete_aggregate,
@@ -93,14 +93,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b7d475a6-5dcd-4ff4-b70a-cd9de66a6672')
     def test_aggregate_list_as_user(self):
-        # Regular user is not allowed to list aggregates.
+        """Regular user is not allowed to list aggregates"""
         self.assertRaises(lib_exc.Forbidden,
                           self.aggregates_client.list_aggregates)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('557cad12-34c9-4ff4-95f0-22f0dfbaf7dc')
     def test_aggregate_get_details_as_user(self):
-        # Regular user is not allowed to get aggregate details.
+        """Regular user is not allowed to get aggregate details"""
         aggregate = self._create_test_aggregate()
         self.assertRaises(lib_exc.Forbidden,
                           self.aggregates_client.show_aggregate,
@@ -109,21 +109,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c74f4bf1-4708-4ff2-95a0-f49eaca951bd')
     def test_aggregate_delete_with_invalid_id(self):
-        # Delete an aggregate with invalid id should raise exceptions.
+        """Delete an aggregate with invalid id should raise exceptions"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_aggregate, -1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3c916244-2c46-49a4-9b55-b20bb0ae512c')
     def test_aggregate_get_details_with_invalid_id(self):
-        # Get aggregate details with invalid id should raise exceptions.
+        """Get aggregate details with invalid id should raise exceptions"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.show_aggregate, -1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0ef07828-12b4-45ba-87cc-41425faf5711')
     def test_aggregate_add_non_exist_host(self):
-        # Adding a non-exist host to an aggregate should raise exceptions.
+        """Adding a non-exist host to an aggregate should fail"""
         while True:
             non_exist_host = data_utils.rand_name('nonexist_host')
             if non_exist_host not in self.hosts:
@@ -135,7 +135,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7324c334-bd13-4c93-8521-5877322c3d51')
     def test_aggregate_add_host_as_user(self):
-        # Regular user is not allowed to add a host to an aggregate.
+        """Regular user is not allowed to add a host to an aggregate"""
         aggregate = self._create_test_aggregate()
         self.assertRaises(lib_exc.Forbidden,
                           self.aggregates_client.add_host,
@@ -144,6 +144,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
     def test_aggregate_add_existent_host(self):
+        """Adding already existing host to aggregate should fail"""
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate = self._create_test_aggregate()
 
@@ -157,7 +158,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
     def test_aggregate_remove_host_as_user(self):
-        # Regular user is not allowed to remove a host from an aggregate.
+        """Regular user is not allowed to remove a host from an aggregate"""
         self.useFixture(fixtures.LockFixture('availability_zone'))
         aggregate = self._create_test_aggregate()
 
@@ -172,6 +173,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
     def test_aggregate_remove_nonexistent_host(self):
+        """Removing not existing host from aggregate should fail"""
         aggregate = self._create_test_aggregate()
 
         self.assertRaises(lib_exc.NotFound, self.client.remove_host,
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index bbd39b6..3eb0d9a 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -27,12 +27,12 @@
 
     @decorators.idempotent_id('d3431479-8a09-4f76-aa2d-26dc580cb27c')
     def test_get_availability_zone_list(self):
-        # List of availability zone
+        """Test listing availability zones"""
         availability_zone = self.client.list_availability_zones()
         self.assertNotEmpty(availability_zone['availabilityZoneInfo'])
 
     @decorators.idempotent_id('ef726c58-530f-44c2-968c-c7bed22d5b8c')
     def test_get_availability_zone_list_detail(self):
-        # List of availability zones and available services
+        """Test listing availability zones with detail"""
         availability_zone = self.client.list_availability_zones(detail=True)
         self.assertNotEmpty(availability_zone['availabilityZoneInfo'])
diff --git a/tempest/api/compute/admin/test_availability_zone_negative.py b/tempest/api/compute/admin/test_availability_zone_negative.py
index a58c22c..6e576e8 100644
--- a/tempest/api/compute/admin/test_availability_zone_negative.py
+++ b/tempest/api/compute/admin/test_availability_zone_negative.py
@@ -18,7 +18,7 @@
 
 
 class AZAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Availability Zone API List"""
+    """Negative Tests of Availability Zone API List"""
 
     @classmethod
     def setup_clients(cls):
@@ -28,8 +28,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bf34dca2-fdc3-4073-9c02-7648d9eae0d7')
     def test_get_availability_zone_list_detail_with_non_admin_user(self):
-        # List of availability zones and available services with
-        # non-administrator user
+        """Test listing availability zone with detail by non-admin user
+
+        List of availability zones and available services with
+        non-administrator user is not allowed.
+        """
+
         self.assertRaises(
             lib_exc.Forbidden,
             self.non_adm_client.list_availability_zones, detail=True)
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 711b441..ccdfbf3 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -27,6 +27,8 @@
 
 
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
+    """Test creating servers with specific flavor"""
+
     @classmethod
     def setup_credentials(cls):
         cls.prepare_instance_network()
@@ -40,8 +42,10 @@
     @decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
     @testtools.skipUnless(CONF.validation.run_validation,
                           'Instance validation tests are disabled.')
+    @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+                      "Aarch64 does not support ephemeral disk test")
     def test_verify_created_server_ephemeral_disk(self):
-        # Verify that the ephemeral disk is created when creating server
+        """Verify that the ephemeral disk is created when creating server"""
         flavor_base = self.flavors_client.show_flavor(
             self.flavor_ref)['flavor']
 
diff --git a/tempest/api/compute/admin/test_delete_server.py b/tempest/api/compute/admin/test_delete_server.py
index 58cac57..c625939 100644
--- a/tempest/api/compute/admin/test_delete_server.py
+++ b/tempest/api/compute/admin/test_delete_server.py
@@ -19,6 +19,8 @@
 
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
+    """Test deletion of servers"""
+
     # NOTE: Server creations of each test class should be under 10
     # for preventing "Quota exceeded for instances".
 
@@ -30,7 +32,7 @@
 
     @decorators.idempotent_id('99774678-e072-49d1-9d2a-49a59bc56063')
     def test_delete_server_while_in_error_state(self):
-        # Delete a server while it's VM state is error
+        """Delete a server while it's VM state is error"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.admin_client.reset_state(server['id'], state='error')
         # Verify server's state
@@ -43,7 +45,7 @@
 
     @decorators.idempotent_id('73177903-6737-4f27-a60c-379e8ae8cf48')
     def test_admin_delete_servers_of_others(self):
-        # Administrator can delete servers of others
+        """Administrator can delete servers of others"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.admin_client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.servers_client, server['id'])
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 66c2c2d..9de3da9 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -22,6 +22,7 @@
 
 
 class FixedIPsTestJson(base.BaseV2ComputeAdminTest):
+    """Test fixed ips API"""
 
     @classmethod
     def skip_checks(cls):
@@ -56,13 +57,16 @@
 
     @decorators.idempotent_id('16b7d848-2f7c-4709-85a3-2dfb4576cc52')
     def test_list_fixed_ip_details(self):
+        """Test getting fixed ip details"""
         fixed_ip = self.client.show_fixed_ip(self.ip)
         self.assertEqual(fixed_ip['fixed_ip']['address'], self.ip)
 
     @decorators.idempotent_id('5485077b-7e46-4cec-b402-91dc3173433b')
     def test_set_reserve(self):
+        """Test reserving fixed ip"""
         self.client.reserve_fixed_ip(self.ip, reserve="None")
 
     @decorators.idempotent_id('7476e322-b9ff-4710-bf82-49d51bac6e2e')
     def test_set_unreserve(self):
+        """Test unreserving fixed ip"""
         self.client.reserve_fixed_ip(self.ip, unreserve="None")
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 7d41f46..1629faa 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -22,6 +22,7 @@
 
 
 class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest):
+    """Negative tests of fixed ips API"""
 
     @classmethod
     def skip_checks(cls):
@@ -58,12 +59,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f17f47d-daad-4adc-986e-12370c93e407')
     def test_list_fixed_ip_details_with_non_admin_user(self):
+        """Test listing fixed ip with detail by non-admin user is forbidden"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.show_fixed_ip, self.ip)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ce60042c-fa60-4836-8d43-1c8e3359dc47')
     def test_set_reserve_with_non_admin_user(self):
+        """Test reserving fixed ip by non-admin user is forbidden"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
                           self.ip, reserve="None")
@@ -71,6 +74,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f1f7a35b-0390-48c5-9803-5f27461439db')
     def test_set_unreserve_with_non_admin_user(self):
+        """Test unreserving fixed ip by non-admin user is forbidden"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reserve_fixed_ip,
                           self.ip, unreserve="None")
@@ -78,6 +82,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f51cf464-7fc5-4352-bc3e-e75cfa2cb717')
     def test_set_reserve_with_invalid_ip(self):
+        """Test reserving invalid fixed ip should fail"""
         # NOTE(maurosr): since this exercises the same code snippet, we do it
         # only for reserve action
         # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
@@ -90,6 +95,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fd26ef50-f135-4232-9d32-281aab3f9176')
     def test_fixed_ip_with_invalid_action(self):
+        """Test operating fixed ip with invalid action should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reserve_fixed_ip,
                           self.ip, invalid_action="None")
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 1483c2e..d6b6b7e 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -46,6 +46,7 @@
 
     @decorators.idempotent_id('8b4330e1-12c4-4554-9390-e6639971f086')
     def test_create_flavor_with_int_id(self):
+        """Test creating flavor with id of type integer"""
         flavor_id = data_utils.rand_int_id(start=1000)
         new_flavor_id = self.create_flavor(ram=self.ram,
                                            vcpus=self.vcpus,
@@ -55,6 +56,7 @@
 
     @decorators.idempotent_id('94c9bb4e-2c2a-4f3c-bb1f-5f0daf918e6d')
     def test_create_flavor_with_uuid_id(self):
+        """Test creating flavor with id of type uuid"""
         flavor_id = data_utils.rand_uuid()
         new_flavor_id = self.create_flavor(ram=self.ram,
                                            vcpus=self.vcpus,
@@ -64,8 +66,11 @@
 
     @decorators.idempotent_id('f83fe669-6758-448a-a85e-32d351f36fe0')
     def test_create_flavor_with_none_id(self):
-        # If nova receives a request with None as flavor_id,
-        # nova generates flavor_id of uuid.
+        """Test creating flavor without id specified
+
+        If nova receives a request with None as flavor_id,
+        nova generates flavor_id of uuid.
+        """
         flavor_id = None
         new_flavor_id = self.create_flavor(ram=self.ram,
                                            vcpus=self.vcpus,
@@ -75,8 +80,10 @@
 
     @decorators.idempotent_id('8261d7b0-be58-43ec-a2e5-300573c3f6c5')
     def test_create_flavor_verify_entry_in_list_details(self):
-        # Create a flavor and ensure it's details are listed
-        # This operation requires the user to have 'admin' role
+        """Create a flavor and ensure its details are listed
+
+        This operation requires the user to have 'admin' role
+        """
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
 
         # Create the flavor
@@ -94,12 +101,16 @@
 
     @decorators.idempotent_id('63dc64e6-2e79-4fdf-868f-85500d308d66')
     def test_create_list_flavor_without_extra_data(self):
-        # Create a flavor and ensure it is listed
-        # This operation requires the user to have 'admin' role
+        """Create a flavor and ensure it is listed
 
+        This operation requires the user to have 'admin' role
+        """
         def verify_flavor_response_extension(flavor):
             # check some extensions for the flavor create/show/detail response
-            self.assertEqual(flavor['swap'], '')
+            if self.is_requested_microversion_compatible('2.74'):
+                self.assertEqual(flavor['swap'], '')
+            else:
+                self.assertEqual(flavor['swap'], 0)
             self.assertEqual(int(flavor['rxtx_factor']), 1)
             self.assertEqual(flavor['OS-FLV-EXT-DATA:ephemeral'], 0)
             self.assertEqual(flavor['os-flavor-access:is_public'], True)
@@ -134,10 +145,12 @@
 
     @decorators.idempotent_id('be6cc18c-7c5d-48c0-ac16-17eaf03c54eb')
     def test_list_non_public_flavor(self):
-        # Create a flavor with os-flavor-access:is_public false.
-        # The flavor should not be present in list_details as the
-        # tenant is not automatically added access list.
-        # This operation requires the user to have 'admin' role
+        """Create a flavor with os-flavor-access:is_public false.
+
+        The flavor should not be present in list_details as the
+        tenant is not automatically added access list.
+        This operation requires the user to have 'admin' role
+        """
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
 
         # Create the flavor
@@ -156,7 +169,7 @@
 
     @decorators.idempotent_id('bcc418ef-799b-47cc-baa1-ce01368b8987')
     def test_create_server_with_non_public_flavor(self):
-        # Create a flavor with os-flavor-access:is_public false
+        """Create a flavor with os-flavor-access:is_public false"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk,
                                     is_public="False")
@@ -169,8 +182,10 @@
 
     @decorators.idempotent_id('b345b196-bfbd-4231-8ac1-6d7fe15ff3a3')
     def test_list_public_flavor_with_other_user(self):
-        # Create a Flavor with public access.
-        # Try to List/Get flavor with another user
+        """Create a Flavor with public access.
+
+        Try to List/Get flavor with another user
+        """
         flavor_name = data_utils.rand_name(self.flavor_name_prefix)
 
         # Create the flavor
@@ -184,6 +199,7 @@
 
     @decorators.idempotent_id('fb9cbde6-3a0e-41f2-a983-bdb0a823c44e')
     def test_is_public_string_variations(self):
+        """Test creating public and non public flavors"""
         flavor_name_not_public = data_utils.rand_name(self.flavor_name_prefix)
         flavor_name_public = data_utils.rand_name(self.flavor_name_prefix)
 
@@ -215,6 +231,7 @@
 
     @decorators.idempotent_id('3b541a2e-2ac2-4b42-8b8d-ba6e22fcd4da')
     def test_create_flavor_using_string_ram(self):
+        """Test creating flavor with ram of type string"""
         new_flavor_id = data_utils.rand_int_id(start=1000)
 
         ram = "1024"
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index b8e2b42..87ab7c7 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -43,8 +43,12 @@
 
     @decorators.idempotent_id('ea2c2211-29fa-4db9-97c3-906d36fad3e0')
     def test_flavor_access_list_with_private_flavor(self):
-        # Test to make sure that list flavor access on a newly created
-        # private flavor will return an empty access list
+        """Test listing flavor access for a private flavor
+
+        Listing flavor access on a newly created private flavor will return
+        an empty access list.
+        """
+        # Test to make sure that
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
 
@@ -54,7 +58,7 @@
 
     @decorators.idempotent_id('59e622f6-bdf6-45e3-8ba8-fedad905a6b4')
     def test_flavor_access_add_remove(self):
-        # Test to add and remove flavor access to a given tenant.
+        """Test add/remove flavor access to a given project"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
 
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 45ca10a..ac09cb0 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -46,7 +46,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0621c53e-d45d-40e7-951d-43e5e257b272')
     def test_flavor_access_list_with_public_flavor(self):
-        # Test to list flavor access with exceptions by querying public flavor
+        """Test listing flavor access of a public flavor should fail"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='True')
         self.assertRaises(lib_exc.NotFound,
@@ -56,7 +56,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41eaaade-6d37-4f28-9c74-f21b46ca67bd')
     def test_flavor_non_admin_add(self):
-        # Test to add flavor access as a user without admin privileges.
+        """Test adding flavor access by a non-admin user is forbidden"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
         self.assertRaises(lib_exc.Forbidden,
@@ -67,7 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('073e79a6-c311-4525-82dc-6083d919cb3a')
     def test_flavor_non_admin_remove(self):
-        # Test to remove flavor access as a user without admin privileges.
+        """Test removing flavor access by a non-admin user should fail"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
 
@@ -84,6 +84,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f3592cc0-0306-483c-b210-9a7b5346eddc')
     def test_add_flavor_access_duplicate(self):
+        """Test adding duplicate flavor access to same flavor should fail"""
         # Create a new flavor.
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
@@ -104,6 +105,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1f710927-3bc7-4381-9f82-0ca6e42644b7')
     def test_remove_flavor_access_not_found(self):
+        """Test removing non existent flavor access should fail"""
         # Create a new flavor.
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4d27a22..4c531b3 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -61,10 +61,13 @@
 
     @decorators.idempotent_id('0b2f9d4b-1ca2-4b99-bb40-165d4bb94208')
     def test_flavor_set_get_update_show_unset_keys(self):
-        # Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
-        # spec as a user with admin privileges.
+        """Test flavor extra spec operations by admin user
+
+        Test to SET, GET, UPDATE, SHOW, UNSET flavor extra
+        spec as a user with admin privileges.
+        """
         # Assigning extra specs values that are to be set
-        specs = {"key1": "value1", "key2": "value2"}
+        specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
         # SET extra specs to the flavor created in setUp
         set_body = self.admin_flavors_client.set_flavor_extra_spec(
             self.flavor['id'], **specs)['extra_specs']
@@ -74,30 +77,34 @@
             self.flavor['id'])['extra_specs'])
         self.assertEqual(get_body, specs)
 
-        # UPDATE the value of the extra specs key1
-        update_body = \
-            self.admin_flavors_client.update_flavor_extra_spec(
-                self.flavor['id'], "key1", key1="value")
-        self.assertEqual({"key1": "value"}, update_body)
+        # UPDATE the value of the extra specs 'hw:numa_nodes'
+        update_body = self.admin_flavors_client.update_flavor_extra_spec(
+            self.flavor['id'], "hw:numa_nodes", **{'hw:numa_nodes': '2'})
+        self.assertEqual({'hw:numa_nodes': '2'}, update_body)
 
-        # GET extra specs and verify the value of the key2
+        # GET extra specs and verify the value of the 'hw:cpu_policy'
         # is the same as before
         get_body = self.admin_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs']
-        self.assertEqual(get_body, {"key1": "value", "key2": "value2"})
+        self.assertEqual(
+            get_body, {'hw:numa_nodes': '2', 'hw:cpu_policy': 'shared'}
+        )
 
         # UNSET extra specs that were set in this test
-        self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
-                                                          "key1")
-        self.admin_flavors_client.unset_flavor_extra_spec(self.flavor['id'],
-                                                          "key2")
+        self.admin_flavors_client.unset_flavor_extra_spec(
+            self.flavor['id'], 'hw:numa_nodes'
+        )
+        self.admin_flavors_client.unset_flavor_extra_spec(
+            self.flavor['id'], 'hw:cpu_policy'
+        )
         get_body = self.admin_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs']
         self.assertEmpty(get_body)
 
     @decorators.idempotent_id('a99dad88-ae1c-4fba-aeb4-32f898218bd0')
     def test_flavor_non_admin_get_all_keys(self):
-        specs = {"key1": "value1", "key2": "value2"}
+        """Test non admin user getting all flavor extra spec keys"""
+        specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
         self.admin_flavors_client.set_flavor_extra_spec(self.flavor['id'],
                                                         **specs)
         body = (self.flavors_client.list_flavor_extra_specs(
@@ -108,11 +115,15 @@
 
     @decorators.idempotent_id('12805a7f-39a3-4042-b989-701d5cad9c90')
     def test_flavor_non_admin_get_specific_key(self):
+        """Test non admin user getting specific flavor extra spec key"""
+        specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
         body = self.admin_flavors_client.set_flavor_extra_spec(
-            self.flavor['id'], key1="value1", key2="value2")['extra_specs']
-        self.assertEqual(body['key1'], 'value1')
-        self.assertIn('key2', body)
+            self.flavor['id'], **specs
+        )['extra_specs']
+        self.assertEqual(body['hw:numa_nodes'], '1')
+        self.assertIn('hw:cpu_policy', body)
+
         body = self.flavors_client.show_flavor_extra_spec(
-            self.flavor['id'], 'key1')
-        self.assertEqual(body['key1'], 'value1')
-        self.assertNotIn('key2', body)
+            self.flavor['id'], 'hw:numa_nodes')
+        self.assertEqual(body['hw:numa_nodes'], '1')
+        self.assertNotIn('hw:cpu_policy', body)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 5cde39e..721acca 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -64,70 +64,82 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a00a3b81-5641-45a8-ab2b-4a8ec41e1d7d')
     def test_flavor_non_admin_set_keys(self):
-        # Test to SET flavor extra spec as a user without admin privileges.
+        """Test to SET flavor extra spec as a user without admin privileges"""
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.set_flavor_extra_spec,
                           self.flavor['id'],
-                          key1="value1", key2="value2")
+                          **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1ebf4ef8-759e-48fe-a801-d451d80476fb')
     def test_flavor_non_admin_update_specific_key(self):
-        # non admin user is not allowed to update flavor extra spec
+        """non admin user is not allowed to update flavor extra spec"""
         body = self.admin_flavors_client.set_flavor_extra_spec(
-            self.flavor['id'], key1="value1", key2="value2")['extra_specs']
-        self.assertEqual(body['key1'], 'value1')
+            self.flavor['id'],
+            **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+        )['extra_specs']
+        self.assertEqual(body['hw:numa_nodes'], '1')
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.
                           update_flavor_extra_spec,
                           self.flavor['id'],
-                          'key1',
-                          key1='value1_new')
+                          'hw:numa_nodes',
+                          **{'hw:numa_nodes': '1'})
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('28f12249-27c7-44c1-8810-1f382f316b11')
     def test_flavor_non_admin_unset_keys(self):
+        """non admin user is not allowed to unset flavor extra spec"""
         self.admin_flavors_client.set_flavor_extra_spec(
-            self.flavor['id'], key1="value1", key2="value2")
+            self.flavor['id'],
+            **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
+        )
 
         self.assertRaises(lib_exc.Forbidden,
                           self.flavors_client.unset_flavor_extra_spec,
                           self.flavor['id'],
-                          'key1')
+                          'hw:numa_nodes')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('440b9f3f-3c7f-4293-a106-0ceda350f8de')
     def test_flavor_unset_nonexistent_key(self):
+        """Unsetting non existence flavor extra spec key should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.admin_flavors_client.unset_flavor_extra_spec,
                           self.flavor['id'],
-                          'nonexistent_key')
+                          'hw:cpu_thread_policy')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('329a7be3-54b2-48be-8052-bf2ce4afd898')
     def test_flavor_get_nonexistent_key(self):
+        """Getting non existence flavor extra spec key should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.flavors_client.show_flavor_extra_spec,
                           self.flavor['id'],
-                          "nonexistent_key")
+                          'hw:cpu_thread_policy')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('25b822b8-9f49-44f6-80de-d99f0482e5cb')
     def test_flavor_update_mismatch_key(self):
-        # the key will be updated should be match the key in the body
+        """Updating unmatched flavor extra spec key should fail
+
+        The key to be updated should match the key in the body
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_flavors_client.update_flavor_extra_spec,
                           self.flavor['id'],
-                          "key2",
-                          key1="value")
+                          'hw:numa_nodes',
+                          **{'hw:cpu_policy': 'shared'})
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f5889590-bf66-41cc-b4b1-6e6370cfd93f')
     def test_flavor_update_more_key(self):
-        # there should be just one item in the request body
+        """Updating multiple flavor spec keys should fail
+
+        There should be just one item in the request body
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_flavors_client.update_flavor_extra_spec,
                           self.flavor['id'],
-                          "key1",
-                          key1="value",
-                          key2="value")
+                          'hw:numa_nodes',
+                          **{'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'})
diff --git a/tempest/api/compute/admin/test_flavors_microversions.py b/tempest/api/compute/admin/test_flavors_microversions.py
index 31b9217..d904cbd 100644
--- a/tempest/api/compute/admin/test_flavors_microversions.py
+++ b/tempest/api/compute/admin/test_flavors_microversions.py
@@ -18,6 +18,8 @@
 
 
 class FlavorsV255TestJSON(base.BaseV2ComputeAdminTest):
+    """Test flavors API with compute microversion greater than 2.54"""
+
     min_microversion = '2.55'
     max_microversion = 'latest'
 
@@ -26,6 +28,11 @@
 
     @decorators.idempotent_id('61976b25-488d-41dc-9dcb-cb9693a7b075')
     def test_crud_flavor(self):
+        """Test create/show/update/list flavor
+
+        Check the response schema of flavors API with microversion greater
+        than 2.54.
+        """
         flavor_id = data_utils.rand_int_id(start=1000)
         # Checking create API response schema
         new_flavor_id = self.create_flavor(ram=512,
@@ -44,6 +51,7 @@
 
 
 class FlavorsV261TestJSON(FlavorsV255TestJSON):
+    """Test flavors API with compute microversion greater than 2.60"""
     min_microversion = '2.61'
     max_microversion = 'latest'
 
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index 2d7e1a7..786c7f0 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -63,7 +63,7 @@
     @decorators.idempotent_id('2c8f145f-8012-4cb8-ac7e-95a587f0e4ab')
     @utils.services('network')
     def test_create_list_delete_floating_ips_bulk(self):
-        # Create, List  and delete the Floating IPs Bulk
+        """Creating, listing and deleting the Floating IPs Bulk"""
         pool = 'test_pool'
         # NOTE(GMann): Reserving the IP range but those are not attached
         # anywhere. Using the below mentioned interface which is not ever
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index c246685..30f3388 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -18,7 +18,7 @@
 
 
 class HostsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests hosts API using admin privileges."""
+    """Tests nova hosts API using admin privileges."""
 
     max_microversion = '2.42'
 
@@ -29,11 +29,13 @@
 
     @decorators.idempotent_id('9bfaf98d-e2cb-44b0-a07e-2558b2821e4f')
     def test_list_hosts(self):
+        """Listing nova hosts"""
         hosts = self.client.list_hosts()['hosts']
         self.assertGreaterEqual(len(hosts), 2, str(hosts))
 
     @decorators.idempotent_id('5dc06f5b-d887-47a2-bb2a-67762ef3c6de')
     def test_list_hosts_with_zone(self):
+        """Listing nova hosts with specified availability zone"""
         self.useFixture(fixtures.LockFixture('availability_zone'))
         hosts = self.client.list_hosts()['hosts']
         host = hosts[0]
@@ -43,20 +45,27 @@
 
     @decorators.idempotent_id('9af3c171-fbf4-4150-a624-22109733c2a6')
     def test_list_hosts_with_a_blank_zone(self):
-        # If send the request with a blank zone, the request will be successful
-        # and it will return all the hosts list
+        """Listing nova hosts with blank availability zone
+
+        If send the request with a blank zone, the request will be successful
+        and it will return all the hosts list
+        """
         hosts = self.client.list_hosts(zone='')['hosts']
         self.assertNotEmpty(hosts)
 
     @decorators.idempotent_id('c6ddbadb-c94e-4500-b12f-8ffc43843ff8')
     def test_list_hosts_with_nonexistent_zone(self):
-        # If send the request with a nonexistent zone, the request will be
-        # successful and no hosts will be returned
+        """Listing nova hosts with not existing availability zone.
+
+        If send the request with a nonexistent zone, the request will be
+        successful and no hosts will be returned
+        """
         hosts = self.client.list_hosts(zone='xxx')['hosts']
         self.assertEmpty(hosts)
 
     @decorators.idempotent_id('38adbb12-aee2-4498-8aec-329c72423aa4')
     def test_show_host_detail(self):
+        """Showing nova host details"""
         hosts = self.client.list_hosts()['hosts']
 
         hosts = [host for host in hosts if host['service'] == 'compute']
diff --git a/tempest/api/compute/admin/test_hosts_negative.py b/tempest/api/compute/admin/test_hosts_negative.py
index 8a91ae2..e9436bc 100644
--- a/tempest/api/compute/admin/test_hosts_negative.py
+++ b/tempest/api/compute/admin/test_hosts_negative.py
@@ -39,18 +39,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dd032027-0210-4d9c-860e-69b1b8deed5f')
     def test_list_hosts_with_non_admin_user(self):
+        """Non admin user is not allowed to list hosts"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.list_hosts)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e75b0a1a-041f-47a1-8b4a-b72a6ff36d3f')
     def test_show_host_detail_with_nonexistent_hostname(self):
+        """Showing host detail with not existing hostname should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.show_host, 'nonexistent_hostname')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('19ebe09c-bfd4-4b7c-81a2-e2e0710f59cc')
     def test_show_host_detail_with_non_admin_user(self):
+        """Non admin user is not allowed to show host details"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.show_host,
                           self.hostname)
@@ -58,6 +61,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e40c72b1-0239-4ed6-ba21-81a184df1f7c')
     def test_update_host_with_non_admin_user(self):
+        """Non admin user is not allowed to update host"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.update_host,
                           self.hostname,
@@ -67,7 +71,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fbe2bf3e-3246-4a95-a59f-94e4e298ec77')
     def test_update_host_with_invalid_status(self):
-        # 'status' can only be 'enable' or 'disable'
+        """Updating host to invalid status should fail
+
+        'status' can only be 'enable' or 'disable'.
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_host,
                           self.hostname,
@@ -77,7 +84,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ab1e230e-5e22-41a9-8699-82b9947915d4')
     def test_update_host_with_invalid_maintenance_mode(self):
-        # 'maintenance_mode' can only be 'enable' or 'disable'
+        """Updating host to invalid maintenance mode should fail
+
+        'maintenance_mode' can only be 'enable' or 'disable'.
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_host,
                           self.hostname,
@@ -87,7 +97,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0cd85f75-6992-4a4a-b1bd-d11e37fd0eee')
     def test_update_host_without_param(self):
-        # 'status' or 'maintenance_mode' needed for host update
+        """Updating host without param should fail
+
+        'status' or 'maintenance_mode' is needed for host update
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_host,
                           self.hostname)
@@ -95,6 +108,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('23c92146-2100-4d68-b2d6-c7ade970c9c1')
     def test_update_nonexistent_host(self):
+        """Updating not existing host should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_host,
                           'nonexistent_hostname',
@@ -104,6 +118,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0d981ac3-4320-4898-b674-82b61fbb60e4')
     def test_startup_nonexistent_host(self):
+        """Starting up not existing host should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.startup_host,
                           'nonexistent_hostname')
@@ -111,6 +126,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f4ebb7e-b2ae-4e5b-a38f-0fd1bb0ddfca')
     def test_startup_host_with_non_admin_user(self):
+        """Non admin user is not allowed to startup host"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.startup_host,
                           self.hostname)
@@ -118,6 +134,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9e637444-29cf-4244-88c8-831ae82c31b6')
     def test_shutdown_nonexistent_host(self):
+        """Shutting down not existing host should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.shutdown_host,
                           'nonexistent_hostname')
@@ -125,6 +142,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a803529c-7e3f-4d3c-a7d6-8e1c203d27f6')
     def test_shutdown_host_with_non_admin_user(self):
+        """Non admin user is not allowed to shutdown host"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.shutdown_host,
                           self.hostname)
@@ -132,6 +150,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f86bfd7b-0b13-4849-ae29-0322e83ee58b')
     def test_reboot_nonexistent_host(self):
+        """Rebooting not existing host should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.reboot_host,
                           'nonexistent_hostname')
@@ -139,6 +158,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('02d79bb9-eb57-4612-abf6-2cb38897d2f8')
     def test_reboot_host_with_non_admin_user(self):
+        """Non admin user is not allowed to reboot host"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.reboot_host,
                           self.hostname)
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 9822c26..347193d 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -36,19 +36,19 @@
 
     @decorators.idempotent_id('7f0ceacd-c64d-4e96-b8ee-d02943142cc5')
     def test_get_hypervisor_list(self):
-        # List of hypervisor and available hypervisors hostname
+        """List of hypervisor and available hypervisors hostname"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers, "No hypervisors found.")
 
     @decorators.idempotent_id('1e7fdac2-b672-4ad1-97a4-bad0e3030118')
     def test_get_hypervisor_list_details(self):
-        # Display the details of the all hypervisor
+        """Display the details of the all hypervisor"""
         hypers = self.client.list_hypervisors(detail=True)['hypervisors']
         self.assertNotEmpty(hypers, "No hypervisors found.")
 
     @decorators.idempotent_id('94ff9eae-a183-428e-9cdb-79fde71211cc')
     def test_get_hypervisor_show_details(self):
-        # Display the details of the specified hypervisor
+        """Display the details of the specified hypervisor"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers, "No hypervisors found.")
 
@@ -59,14 +59,14 @@
 
     @decorators.idempotent_id('797e4f28-b6e0-454d-a548-80cc77c00816')
     def test_get_hypervisor_stats(self):
-        # Verify the stats of the all hypervisor
+        """Verify the stats of the all hypervisor"""
         stats = (self.client.show_hypervisor_statistics()
                  ['hypervisor_statistics'])
         self.assertNotEmpty(stats)
 
     @decorators.idempotent_id('91a50d7d-1c2b-4f24-b55a-a1fe20efca70')
     def test_get_hypervisor_uptime(self):
-        # Verify that GET shows the specified hypervisor uptime
+        """Verify that GET shows the specified hypervisor uptime"""
         hypers = self._list_hypervisors()
 
         # Ironic will register each baremetal node as a 'hypervisor',
@@ -106,10 +106,13 @@
 
 
 class HypervisorAdminV228Test(HypervisorAdminTestBase):
+    """Tests Hypervisors API higher than 2.27 that require admin privileges"""
+
     min_microversion = '2.28'
 
     @decorators.idempotent_id('d46bab64-0fbe-4eb8-9133-e6ee56188cc5')
     def test_get_list_hypervisor_details(self):
+        """Test listing and showing hypervisor details"""
         # NOTE(zhufl): This test tests the hypervisor APIs response schema
         # for 2.28 microversion. No specific assert or behaviour verification
         # is needed.
@@ -119,11 +122,13 @@
 
 
 class HypervisorAdminUnderV252Test(HypervisorAdminTestBase):
+    """Tests Hypervisors API below 2.53 that require admin privileges"""
+
     max_microversion = '2.52'
 
     @decorators.idempotent_id('e81bba3f-6215-4e39-a286-d52d2f906862')
     def test_get_hypervisor_show_servers(self):
-        # Show instances about the specific hypervisors
+        """Test showing instances about the specific hypervisors"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers, "No hypervisors found.")
 
@@ -134,6 +139,7 @@
 
     @decorators.idempotent_id('d7e1805b-3b14-4a3b-b6fd-50ec6d9f361f')
     def test_search_hypervisor(self):
+        """Test searching for hypervisors by its name"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers, "No hypervisors found.")
         hypers = self.client.search_hypervisor(
diff --git a/tempest/api/compute/admin/test_hypervisor_negative.py b/tempest/api/compute/admin/test_hypervisor_negative.py
index 0056376..9aaffd9 100644
--- a/tempest/api/compute/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/admin/test_hypervisor_negative.py
@@ -40,6 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c136086a-0f67-4b2b-bc61-8482bd68989f')
     def test_show_nonexistent_hypervisor(self):
+        """Test showing non existent hypervisor should fail"""
         nonexistent_hypervisor_id = data_utils.rand_uuid()
 
         self.assertRaises(
@@ -50,6 +51,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('51e663d0-6b89-4817-a465-20aca0667d03')
     def test_show_hypervisor_with_non_admin_user(self):
+        """Test showing hypervisor by non admin user should fail"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers)
 
@@ -61,6 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e2b061bb-13f9-40d8-9d6e-d5bf17595849')
     def test_get_hypervisor_stats_with_non_admin_user(self):
+        """Test getting hypervisor stats by non admin user should fail"""
         self.assertRaises(
             lib_exc.Forbidden,
             self.non_adm_client.show_hypervisor_statistics)
@@ -68,6 +71,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f60aa680-9a3a-4c7d-90e1-fae3a4891303')
     def test_get_nonexistent_hypervisor_uptime(self):
+        """Test showing uptime of non existent hypervisor should fail"""
         nonexistent_hypervisor_id = data_utils.rand_uuid()
 
         self.assertRaises(
@@ -78,6 +82,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6c3461f9-c04c-4e2a-bebb-71dc9cb47df2')
     def test_get_hypervisor_uptime_with_non_admin_user(self):
+        """Test showing uptime of hypervisor by non admin user should fail"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers)
 
@@ -89,7 +94,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('51b3d536-9b14-409c-9bce-c6f7c794994e')
     def test_get_hypervisor_list_with_non_admin_user(self):
-        # List of hypervisor and available services with non admin user
+        """Test listing hypervisors by non admin user should fail"""
         self.assertRaises(
             lib_exc.Forbidden,
             self.non_adm_client.list_hypervisors)
@@ -97,18 +102,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dc02db05-e801-4c5f-bc8e-d915290ab345')
     def test_get_hypervisor_list_details_with_non_admin_user(self):
-        # List of hypervisor details and available services with non admin user
+        """Test listing hypervisor details by non admin user should fail"""
         self.assertRaises(
             lib_exc.Forbidden,
             self.non_adm_client.list_hypervisors, detail=True)
 
 
 class HypervisorAdminNegativeUnderV252Test(HypervisorAdminNegativeTestBase):
+    """Tests Hypervisors API below ver 2.53 that require admin privileges"""
+
     max_microversion = '2.52'
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2a0a3938-832e-4859-95bf-1c57c236b924')
     def test_show_servers_with_non_admin_user(self):
+        """Test showing hypervisor servers by non admin user should fail"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers)
 
@@ -120,6 +128,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('02463d69-0ace-4d33-a4a8-93d7883a2bba')
     def test_show_servers_with_nonexistent_hypervisor(self):
+        """Test showing servers on non existent hypervisor should fail"""
         nonexistent_hypervisor_id = data_utils.rand_uuid()
 
         self.assertRaises(
@@ -130,6 +139,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5b6a6c79-5dc1-4fa5-9c58-9c8085948e74')
     def test_search_hypervisor_with_non_admin_user(self):
+        """Test searching hypervisor by non admin user should fail"""
         hypers = self._list_hypervisors()
         self.assertNotEmpty(hypers)
 
@@ -141,6 +151,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('19a45cc1-1000-4055-b6d2-28e8b2ec4faa')
     def test_search_nonexistent_hypervisor(self):
+        """Test searching non existent hypervisor should fail"""
         self.assertRaises(
             lib_exc.NotFound,
             self.client.search_hypervisor,
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log.py b/tempest/api/compute/admin/test_instance_usage_audit_log.py
index 1b62249..c4c0542 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log.py
@@ -14,14 +14,14 @@
 #    under the License.
 
 import datetime
-
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
 from tempest.api.compute import base
 from tempest.lib import decorators
 
 
 class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
+    """Test instance usage audit logs API"""
 
     @classmethod
     def setup_clients(cls):
@@ -30,12 +30,12 @@
 
     @decorators.idempotent_id('25319919-33d9-424f-9f99-2c203ee48b9d')
     def test_list_instance_usage_audit_logs(self):
-        # list instance usage audit logs
+        """Test listing instance usage audit logs"""
         self.adm_client.list_instance_usage_audit_logs()
 
     @decorators.idempotent_id('6e40459d-7c5f-400b-9e83-449fbc8e7feb')
     def test_get_instance_usage_audit_log(self):
-        # Get instance usage audit log before specified time
+        """Test getting instance usage audit log before specified time"""
         now = datetime.datetime.now()
         self.adm_client.show_instance_usage_audit_log(
             urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
index de8e221..c115451 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
@@ -14,8 +14,7 @@
 #    under the License.
 
 import datetime
-
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
 from tempest.api.compute import base
 from tempest.lib import decorators
@@ -23,6 +22,7 @@
 
 
 class InstanceUsageAuditLogNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Negative tests of instance usage audit logs"""
 
     @classmethod
     def setup_clients(cls):
@@ -32,7 +32,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a9d33178-d2c9-4131-ad3b-f4ca8d0308a2')
     def test_instance_usage_audit_logs_with_nonadmin_user(self):
-        # the instance_usage_audit_logs API just can be accessed by admin user
+        """Test list/show instance usage audit logs by non-admin should fail
+
+        The instance_usage_audit_logs API just can be accessed by admin user.
+        """
         self.assertRaises(lib_exc.Forbidden,
                           self.instance_usages_audit_log_client.
                           list_instance_usage_audit_logs)
@@ -45,6 +48,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9b952047-3641-41c7-ba91-a809fc5974c8')
     def test_get_instance_usage_audit_logs_with_invalid_time(self):
+        """Test showing instance usage audit logs with invalid time
+
+        Showing instance usage audit logs with invalid time should fail.
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.adm_client.show_instance_usage_audit_log,
                           "invalid_time")
diff --git a/tempest/api/compute/admin/test_keypairs_v210.py b/tempest/api/compute/admin/test_keypairs_v210.py
index 40ed532..3068127 100644
--- a/tempest/api/compute/admin/test_keypairs_v210.py
+++ b/tempest/api/compute/admin/test_keypairs_v210.py
@@ -19,6 +19,8 @@
 
 
 class KeyPairsV210TestJSON(base.BaseKeypairTest):
+    """Tests KeyPairs API with microversion higher than 2.9"""
+
     credentials = ['primary', 'admin']
     min_microversion = '2.10'
 
@@ -48,6 +50,13 @@
 
     @decorators.idempotent_id('3c8484af-cfb3-48f6-b8ba-d5d58bbf3eac')
     def test_admin_manage_keypairs_for_other_users(self):
+        """Test admin managing keypairs for other users
+
+        First admin creates a keypair for an other user, then admin lists
+        keypairs filtered by that user, and keypairs created for that user
+        should appear in the result and keypairs not created for that user
+        should not appear in the result.
+        """
         user_id = self.non_admin_client.user_id
         key_list = self._create_and_check_keypairs(user_id)
         first_keyname = key_list[0]['name']
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 836b975..c91b557 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -23,6 +23,8 @@
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 CONF = config.CONF
@@ -30,6 +32,7 @@
 
 
 class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
+    """Test live migration operations supported by admin user"""
 
     # These tests don't attempt any SSH validation nor do they use
     # floating IPs on the instance, so all we need is a network and
@@ -54,6 +57,10 @@
     def setup_clients(cls):
         super(LiveMigrationTestBase, cls).setup_clients()
         cls.admin_migration_client = cls.os_admin.migrations_client
+        cls.networks_client = cls.os_primary.networks_client
+        cls.subnets_client = cls.os_primary.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
+        cls.trunks_client = cls.os_primary.trunks_client
 
     def _migrate_server_to(self, server_id, dest_host, volume_backed=False):
         kwargs = dict()
@@ -70,6 +77,10 @@
 
     def _live_migrate(self, server_id, target_host, state,
                       volume_backed=False):
+        # If target_host is None, check whether source host is different with
+        # the new host after migration.
+        if target_host is None:
+            source_host = self.get_host_for_server(server_id)
         self._migrate_server_to(server_id, target_host, volume_backed)
         waiters.wait_for_server_status(self.servers_client, server_id, state)
         migration_list = (self.admin_migration_client.list_migrations()
@@ -81,8 +92,12 @@
             if (live_migration['instance_uuid'] == server_id):
                 msg += "\n%s" % live_migration
         msg += "]"
-        self.assertEqual(target_host, self.get_host_for_server(server_id),
-                         msg)
+        if target_host is None:
+            self.assertNotEqual(source_host,
+                                self.get_host_for_server(server_id), msg)
+        else:
+            self.assertEqual(target_host, self.get_host_for_server(server_id),
+                             msg)
 
 
 class LiveMigrationTest(LiveMigrationTestBase):
@@ -104,7 +119,11 @@
         server_id = self.create_test_server(wait_until="ACTIVE",
                                             volume_backed=volume_backed)['id']
         source_host = self.get_host_for_server(server_id)
-        destination_host = self.get_host_other_than(server_id)
+        if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
+            # not to specify a host so that the scheduler will pick one
+            destination_host = None
+        else:
+            destination_host = self.get_host_other_than(server_id)
 
         if state == 'PAUSED':
             self.admin_servers_client.pause_server(server_id)
@@ -122,13 +141,21 @@
             self._live_migrate(server_id, source_host, state, volume_backed)
 
     @decorators.idempotent_id('1dce86b8-eb04-4c03-a9d8-9c1dc3ee0c7b')
+    @testtools.skipUnless(CONF.compute_feature_enabled.
+                          block_migration_for_live_migration,
+                          'Block Live migration not available')
     def test_live_block_migration(self):
+        """Test live migrating an active server"""
         self._test_live_migration()
 
     @decorators.idempotent_id('1e107f21-61b2-4988-8f22-b196e938ab88')
+    @testtools.skipUnless(CONF.compute_feature_enabled.
+                          block_migration_for_live_migration,
+                          'Block Live migration not available')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_live_block_migration_paused(self):
+        """Test live migrating a paused server"""
         self._test_live_migration(state='PAUSED')
 
     @testtools.skipUnless(CONF.compute_feature_enabled.
@@ -137,6 +164,7 @@
     @decorators.idempotent_id('5071cf17-3004-4257-ae61-73a84e28badd')
     @utils.services('volume')
     def test_volume_backed_live_migration(self):
+        """Test live migrating an active server booted from volume"""
         self._test_live_migration(volume_backed=True)
 
     @decorators.idempotent_id('e19c0cc6-6720-4ed8-be83-b6603ed5c812')
@@ -147,10 +175,20 @@
                       block_migrate_cinder_iscsi,
                       'Block Live migration not configured for iSCSI')
     @utils.services('volume')
-    def test_iscsi_volume(self):
+    def test_live_block_migration_with_attached_volume(self):
+        """Test the live-migration of an instance with an attached volume.
+
+        This tests the live-migration of an instance with both a local disk and
+        attach volume. This differs from test_volume_backed_live_migration
+        above that tests live-migration with only an attached volume.
+        """
         server = self.create_test_server(wait_until="ACTIVE")
         server_id = server['id']
-        target_host = self.get_host_other_than(server_id)
+        if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
+            # not to specify a host so that the scheduler will pick one
+            target_host = None
+        else:
+            target_host = self.get_host_other_than(server_id)
 
         volume = self.create_volume()
 
@@ -165,6 +203,90 @@
 
         self.assertEqual(volume_id1, volume_id2)
 
+    def _create_net_subnet(self, name, cidr):
+        net_name = data_utils.rand_name(name=name)
+        net = self.networks_client.create_network(name=net_name)['network']
+        self.addClassResourceCleanup(
+            self.networks_client.delete_network, net['id'])
+
+        subnet = self.subnets_client.create_subnet(
+            network_id=net['id'],
+            cidr=cidr,
+            ip_version=4)
+        self.addClassResourceCleanup(self.subnets_client.delete_subnet,
+                                     subnet['subnet']['id'])
+        return net
+
+    def _create_port(self, network_id, name):
+        name = data_utils.rand_name(name=name)
+        port = self.ports_client.create_port(name=name,
+                                             network_id=network_id)['port']
+        self.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                     self.ports_client.delete_port,
+                                     port_id=port['id'])
+        return port
+
+    def _create_trunk_with_subport(self):
+        tenant_network = self.get_tenant_network()
+        parent = self._create_port(network_id=tenant_network['id'],
+                                   name='parent')
+        net = self._create_net_subnet(name='subport_net', cidr='19.80.0.0/24')
+        subport = self._create_port(network_id=net['id'], name='subport')
+
+        trunk = self.trunks_client.create_trunk(
+            name=data_utils.rand_name('trunk'),
+            port_id=parent['id'],
+            sub_ports=[{"segmentation_id": 42, "port_id": subport['id'],
+                        "segmentation_type": "vlan"}]
+        )['trunk']
+        self.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                     self.trunks_client.delete_trunk,
+                                     trunk['id'])
+        return trunk, parent, subport
+
+    def _is_port_status_active(self, port_id):
+        port = self.ports_client.show_port(port_id)['port']
+        return port['status'] == 'ACTIVE'
+
+    @decorators.idempotent_id('0022c12e-a482-42b0-be2d-396b5f0cffe3')
+    @utils.requires_ext(service='network', extension='trunk')
+    @utils.services('network')
+    def test_live_migration_with_trunk(self):
+        """Test live migration with trunk and subport"""
+        trunk, parent, subport = self._create_trunk_with_subport()
+
+        server = self.create_test_server(
+            wait_until="ACTIVE", networks=[{'port': parent['id']}])
+
+        # Wait till subport status is ACTIVE
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, subport['id']))
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, parent['id']))
+        subport = self.ports_client.show_port(subport['id'])['port']
+
+        if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
+            # not to specify a host so that the scheduler will pick one
+            target_host = None
+        else:
+            target_host = self.get_host_other_than(server['id'])
+
+        self._live_migrate(server['id'], target_host, 'ACTIVE')
+
+        # Wait till subport status is ACTIVE
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, subport['id']))
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, parent['id']))
+
 
 class LiveMigrationRemoteConsolesV26Test(LiveMigrationTestBase):
     min_microversion = '2.6'
diff --git a/tempest/api/compute/admin/test_live_migration_negative.py b/tempest/api/compute/admin/test_live_migration_negative.py
index 8327a3b..80c0525 100644
--- a/tempest/api/compute/admin/test_live_migration_negative.py
+++ b/tempest/api/compute/admin/test_live_migration_negative.py
@@ -24,6 +24,8 @@
 
 
 class LiveMigrationNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of live migration"""
+
     @classmethod
     def skip_checks(cls):
         super(LiveMigrationNegativeTest, cls).skip_checks()
@@ -40,7 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7fb7856e-ae92-44c9-861a-af62d7830bcb')
     def test_invalid_host_for_migration(self):
-        # Migrating to an invalid host should not change the status
+        """Test migrating to an invalid host should not change the status"""
         target_host = data_utils.rand_name('host')
         server = self.create_test_server(wait_until="ACTIVE")
 
@@ -52,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6e2f94f5-2ee8-4830-bef5-5bc95bb0795b')
     def test_live_block_migration_suspended(self):
+        """Test migrating a suspended server should not change the status"""
         server = self.create_test_server(wait_until="ACTIVE")
 
         self.admin_servers_client.suspend_server(server['id'])
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 83f2e61..89152d6 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -25,6 +25,7 @@
 
 
 class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
+    """Test migration operations supported by admin user"""
 
     @classmethod
     def setup_clients(cls):
@@ -33,14 +34,14 @@
 
     @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
     def test_list_migrations(self):
-        # Admin can get the migrations list
+        """Test admin user can get the migrations list"""
         self.client.list_migrations()
 
     @decorators.idempotent_id('1b512062-8093-438e-b47a-37d2f597cd64')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_list_migrations_in_flavor_resize_situation(self):
-        # Admin can get the migrations list which contains the resized server
+        """Admin can get the migrations list containing the resized server"""
         server = self.create_test_server(wait_until="ACTIVE")
         server_id = server['id']
 
@@ -62,8 +63,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_revert_deleted_flavor(self):
-        # Tests that we can revert the resize on an instance whose original
-        # flavor has been deleted.
+        """Test reverting resized server with original flavor deleted
+
+        Tests that we can revert the resize on an instance whose original
+        flavor has been deleted.
+        """
 
         # First we have to create a flavor that we can delete so make a copy
         # of the normal flavor from which we'd create a server.
@@ -90,6 +94,16 @@
         # Now boot a server with the copied flavor.
         server = self.create_test_server(
             wait_until='ACTIVE', flavor=flavor['id'])
+        server = self.servers_client.show_server(server['id'])['server']
+
+        # If 'id' not in server['flavor'], we can only compare the flavor
+        # details, so here we should save the to-be-deleted flavor's details,
+        # for the flavor comparison after the server resizing.
+        if not server['flavor'].get('id'):
+            pre_flavor = {}
+            body = self.flavors_client.show_flavor(flavor['id'])['flavor']
+            for key in ['name', 'ram', 'vcpus', 'disk']:
+                pre_flavor[key] = body[key]
 
         # Delete the flavor we used to boot the instance.
         self._flavor_clean_up(flavor['id'])
@@ -106,7 +120,18 @@
                                        'ACTIVE')
 
         server = self.servers_client.show_server(server['id'])['server']
-        self.assert_flavor_equal(flavor['id'], server['flavor'])
+        if server['flavor'].get('id'):
+            msg = ('server flavor is not same as flavor!')
+            self.assertEqual(flavor['id'], server['flavor']['id'], msg)
+        else:
+            self.assertEqual(pre_flavor['name'],
+                             server['flavor']['original_name'],
+                             "original_name in server flavor is not same as "
+                             "flavor name!")
+            for key in ['ram', 'vcpus', 'disk']:
+                msg = ('attribute %s in server flavor is not same as '
+                       'flavor!' % key)
+                self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
 
     def _test_cold_migrate_server(self, revert=False):
         if CONF.compute.min_compute_nodes < 2:
@@ -137,10 +162,12 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                           'Cold migration not available.')
     def test_cold_migration(self):
+        """Test cold migrating server and then confirm the migration"""
         self._test_cold_migrate_server(revert=False)
 
     @decorators.idempotent_id('caa1aa8b-f4ef-4374-be0d-95f001c2ac2d')
     @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                           'Cold migration not available.')
     def test_revert_cold_migration(self):
+        """Test cold migrating server and then revert the migration"""
         self._test_cold_migrate_server(revert=True)
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index 33b23b5..fb6376e 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -35,6 +35,7 @@
 
     @decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
     def test_get_network(self):
+        """Test getting network from nova side"""
         networks = self.client.list_networks()['networks']
         if CONF.compute.fixed_network_name:
             configured_network = [x for x in networks if x['label'] ==
@@ -56,6 +57,7 @@
 
     @decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
     def test_list_all_networks(self):
+        """Test getting all networks from nova side"""
         networks = self.client.list_networks()['networks']
         # Check the configured network is in the list
         if CONF.compute.fixed_network_name:
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 0060ffe..9d5e0c9 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -97,9 +97,11 @@
 
 
 class QuotasAdminTestJSON(QuotasAdminTestBase):
+    """Test compute quotas by admin user"""
+
     @decorators.idempotent_id('3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d')
     def test_get_default_quotas(self):
-        # Admin can get the default resource quota set for a tenant
+        """Test admin can get the default compute quota set for a project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = self.adm_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
@@ -109,7 +111,7 @@
 
     @decorators.idempotent_id('55fbe2bf-21a9-435b-bbd2-4162b0ed799a')
     def test_update_all_quota_resources_for_tenant(self):
-        # Admin can update all the resource quota limits for a tenant
+        """Test admin can update all the compute quota limits for a project"""
         default_quota_set = self.adm_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
         new_quota_set = {'metadata_items': 256, 'ram': 10240,
@@ -140,11 +142,12 @@
     # TODO(afazekas): merge these test cases
     @decorators.idempotent_id('ce9e0815-8091-4abd-8345-7fe5b85faa1d')
     def test_get_updated_quotas(self):
+        """Test that GET shows the updated quota set of project"""
         self._get_updated_quotas()
 
     @decorators.idempotent_id('389d04f0-3a41-405f-9317-e5f86e3c44f0')
     def test_delete_quota(self):
-        # Admin can delete the resource quota set for a project
+        """Test admin can delete the compute quota set for a project"""
         project_name = data_utils.rand_name('ram_quota_project')
         project_desc = project_name + '-desc'
         project = identity.identity_utils(self.os_admin).create_project(
@@ -165,26 +168,40 @@
 
 
 class QuotasAdminTestV236(QuotasAdminTestBase):
-    min_microversion = '2.36'
+    """Test compute quotas with microversion greater than 2.35
+
     # NOTE(gmann): This test tests the Quota APIs response schema
     # for 2.36 microversion. No specific assert or behaviour verification
     # is needed.
+    """
+
+    min_microversion = '2.36'
 
     @decorators.idempotent_id('4268b5c9-92e5-4adc-acf1-3a2798f3d803')
     def test_get_updated_quotas(self):
-        # Checking Quota update, get, get details APIs response schema
+        """Test compute quotas API with microversion greater than 2.35
+
+        Checking compute quota update, get, get details APIs response schema.
+        """
         self._get_updated_quotas()
 
 
 class QuotasAdminTestV257(QuotasAdminTestBase):
-    min_microversion = '2.57'
+    """Test compute quotas with microversion greater than 2.56
+
     # NOTE(gmann): This test tests the Quota APIs response schema
     # for 2.57 microversion. No specific assert or behaviour verification
     # is needed.
+    """
+
+    min_microversion = '2.57'
 
     @decorators.idempotent_id('e641e6c6-e86c-41a4-9e5c-9493c0ae47ad')
     def test_get_updated_quotas(self):
-        # Checking Quota update, get, get details APIs response schema
+        """Test compute quotas API with microversion greater than 2.56
+
+        Checking compute quota update, get, get details APIs response schema.
+        """
         self._get_updated_quotas()
 
 
@@ -212,6 +229,7 @@
     # 'danger' flag.
     @decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
     def test_update_default_quotas(self):
+        """Test updating default compute quota class set"""
         # get the current 'default' quota class values
         body = (self.adm_client.show_quota_class_set('default')
                 ['quota_class_set'])
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index f90ff92..04dbc2d 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -53,10 +53,12 @@
 
 
 class QuotasAdminNegativeTest(QuotasAdminNegativeTestBase):
+    """Negative tests of nova quotas"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('733abfe8-166e-47bb-8363-23dbd7ff3476')
     def test_update_quota_normal_user(self):
+        """Test updating nova quota by normal user should fail"""
         self.assertRaises(lib_exc.Forbidden,
                           self.client.update_quota_set,
                           self.demo_tenant_id,
@@ -67,7 +69,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('91058876-9947-4807-9f22-f6eb17140d9b')
     def test_create_server_when_cpu_quota_is_full(self):
-        # Disallow server creation when tenant's vcpu quota is full
+        """Disallow server creation when tenant's vcpu quota is full"""
         self._update_quota('cores', 0)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.create_test_server)
@@ -75,7 +77,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6fdd7012-584d-4327-a61c-49122e0d5864')
     def test_create_server_when_memory_quota_is_full(self):
-        # Disallow server creation when tenant's memory quota is full
+        """Disallow server creation when tenant's memory quota is full"""
         self._update_quota('ram', 0)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.create_test_server)
@@ -83,13 +85,15 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7c6be468-0274-449a-81c3-ac1c32ee0161')
     def test_create_server_when_instances_quota_is_full(self):
-        # Once instances quota limit is reached, disallow server creation
+        """Once instances quota limit is reached, disallow server creation"""
         self._update_quota('instances', 0)
         self.assertRaises((lib_exc.Forbidden, lib_exc.OverLimit),
                           self.create_test_server)
 
 
 class QuotasSecurityGroupAdminNegativeTest(QuotasAdminNegativeTestBase):
+    """Negative tests of nova security group quota"""
+
     max_microversion = '2.35'
 
     @decorators.skip_because(bug="1186354",
@@ -98,7 +102,7 @@
     @decorators.idempotent_id('7c6c8f3b-2bf6-4918-b240-57b136a66aa0')
     @utils.services('network')
     def test_security_groups_exceed_limit(self):
-        # Negative test: Creation Security Groups over limit should FAIL
+        """Negative test: Creation Security Groups over limit should FAIL"""
         # Set the quota to number of used security groups
         sg_quota = self.limits_client.show_limits()['limits']['absolute'][
             'totalSecurityGroupsUsed']
@@ -117,7 +121,7 @@
     @decorators.idempotent_id('6e9f436d-f1ed-4f8e-a493-7275dfaa4b4d')
     @utils.services('network')
     def test_security_groups_rules_exceed_limit(self):
-        # Negative test: Creation of Security Group Rules should FAIL
+        """Negative test: Creation of Security Group Rules should FAIL"""
         # when we reach limit maxSecurityGroupRules
         self._update_quota('security_group_rules', 0)
 
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index dfa801b..f0a6a84 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -20,6 +20,12 @@
 
 
 class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
+    """Test security groups API that requires admin privilege
+
+    Test security groups API that requires admin privilege with compute
+    microversion less than 2.36
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -37,7 +43,17 @@
     @decorators.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
     @utils.services('network')
     def test_list_security_groups_list_all_tenants_filter(self):
-        # Admin can list security groups of all tenants
+        """Test listing security groups with all_tenants filter
+
+        1. Create two security groups for non-admin user
+        2. Create two security groups for admin user
+        3. Fetch all security groups based on 'all_tenants' search filter by
+           admin, check that all four created security groups are present in
+           fetched list
+        4. Fetch all security groups based on 'all_tenants' search filter by
+           non-admin, check only two security groups created by the provided
+           non-admin user are present in fetched list
+        """
         # List of all security groups created
         security_group_list = []
         # Create two security groups for a non-admin tenant
diff --git a/tempest/api/compute/admin/test_server_diagnostics.py b/tempest/api/compute/admin/test_server_diagnostics.py
index 005efdd..d855a62 100644
--- a/tempest/api/compute/admin/test_server_diagnostics.py
+++ b/tempest/api/compute/admin/test_server_diagnostics.py
@@ -19,6 +19,8 @@
 
 
 class ServerDiagnosticsTest(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion less than 2.48"""
+
     min_microversion = None
     max_microversion = '2.47'
 
@@ -29,6 +31,7 @@
 
     @decorators.idempotent_id('31ff3486-b8a0-4f56-a6c0-aab460531db3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         diagnostics = self.client.show_server_diagnostics(server_id)
 
@@ -41,6 +44,8 @@
 
 
 class ServerDiagnosticsV248Test(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion greater than 2.47"""
+
     min_microversion = '2.48'
     max_microversion = 'latest'
 
@@ -51,6 +56,7 @@
 
     @decorators.idempotent_id('64d0d48c-dff1-11e6-bf01-fe55135034f3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         # Response status and filed types will be checked by json schema
         self.client.show_server_diagnostics(server_id)
diff --git a/tempest/api/compute/admin/test_server_diagnostics_negative.py b/tempest/api/compute/admin/test_server_diagnostics_negative.py
index 6215c37..8f14cbc 100644
--- a/tempest/api/compute/admin/test_server_diagnostics_negative.py
+++ b/tempest/api/compute/admin/test_server_diagnostics_negative.py
@@ -18,6 +18,7 @@
 
 
 class ServerDiagnosticsNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of server diagnostics"""
 
     @classmethod
     def setup_clients(cls):
@@ -27,7 +28,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e84e2234-60d2-42fa-8b30-e2d3049724ac')
     def test_get_server_diagnostics_by_non_admin(self):
-        # Non-admin user cannot view server diagnostics according to policy
+        """Test getting server diagnostics by non-admin user is forbidden
+
+        Non-admin user cannot view server diagnostics according to policy.
+        """
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         self.assertRaises(lib_exc.Forbidden,
                           self.client.show_server_diagnostics, server_id)
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 170b2cc..ab1b49a 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -14,10 +14,13 @@
 
 from tempest.api.compute import base
 from tempest.common import waiters
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Servers API using admin privileges"""
@@ -45,7 +48,7 @@
 
     @decorators.idempotent_id('06f960bb-15bb-48dc-873d-f96e89be7870')
     def test_list_servers_filter_by_error_status(self):
-        # Filter the list of servers by server error status
+        """Test filtering the list of servers by server error status"""
         params = {'status': 'error'}
         self.client.reset_state(self.s1_id, state='error')
         body = self.non_admin_client.list_servers(**params)
@@ -61,6 +64,7 @@
 
     @decorators.idempotent_id('d56e9540-73ed-45e0-9b88-98fc419087eb')
     def test_list_servers_detailed_filter_by_invalid_status(self):
+        """Test filtering the list of servers by invalid server status"""
         params = {'status': 'invalid_status'}
         if self.is_requested_microversion_compatible('2.37'):
             body = self.client.list_servers(detail=True, **params)
@@ -72,8 +76,11 @@
 
     @decorators.idempotent_id('51717b38-bdc1-458b-b636-1cf82d99f62f')
     def test_list_servers_by_admin(self):
-        # Listing servers by admin user returns a list which doesn't
-        # contain the other tenants' server by default
+        """Test listing servers by admin without other projects
+
+        Listing servers by admin user returns a list which doesn't
+        contain the other projects' server by default.
+        """
         body = self.client.list_servers(detail=True)
         servers = body['servers']
 
@@ -85,8 +92,11 @@
 
     @decorators.idempotent_id('9f5579ae-19b4-4985-a091-2a5d56106580')
     def test_list_servers_by_admin_with_all_tenants(self):
-        # Listing servers by admin user with all tenants parameter
-        # Here should be listed all servers
+        """Test listing servers by admin with all tenants
+
+        Listing servers by admin user with all tenants parameter,
+        all servers should be listed.
+        """
         params = {'all_tenants': ''}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -98,8 +108,10 @@
     @decorators.related_bug('1659811')
     @decorators.idempotent_id('7e5d6b8f-454a-4ba1-8ae2-da857af8338b')
     def test_list_servers_by_admin_with_specified_tenant(self):
-        # In nova v2, tenant_id is ignored unless all_tenants is specified
+        """Test listing servers by admin with specified project
 
+        In nova v2, tenant_id is ignored unless all_tenants is specified.
+        """
         # List the primary tenant but get nothing due to odd specified behavior
         tenant_id = self.non_admin_client.tenant_id
         params = {'tenant_id': tenant_id}
@@ -128,7 +140,7 @@
 
     @decorators.idempotent_id('86c7a8f7-50cf-43a9-9bac-5b985317134f')
     def test_list_servers_filter_by_exist_host(self):
-        # Filter the list of servers by existent host
+        """Test filtering the list of servers by existent host"""
         server = self.client.show_server(self.s1_id)['server']
         hostname = server['OS-EXT-SRV-ATTR:host']
         params = {'host': hostname, 'all_tenants': '1'}
@@ -144,6 +156,7 @@
 
     @decorators.idempotent_id('ee8ae470-db70-474d-b752-690b7892cab1')
     def test_reset_state_server(self):
+        """Test resetting server state to error/active"""
         # Reset server's state to 'error'
         self.client.reset_state(self.s1_id, state='error')
 
@@ -160,9 +173,11 @@
 
     @decorators.idempotent_id('682cb127-e5bb-4f53-87ce-cb9003604442')
     def test_rebuild_server_in_error_state(self):
-        # The server in error state should be rebuilt using the provided
-        # image and changed to ACTIVE state
+        """Test rebuilding server in error state
 
+        The server in error state should be rebuilt using the provided
+        image and changed to ACTIVE state.
+        """
         # resetting vm state require admin privilege
         self.client.reset_state(self.s1_id, state='error')
         rebuilt_server = self.non_admin_client.rebuild_server(
@@ -188,6 +203,11 @@
 
     @decorators.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
     def test_reset_network_inject_network_info(self):
+        """Test resetting and injecting network info of a server"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'The resetNetwork server action is not supported.')
+
         # Reset Network of a Server
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.reset_network(server['id'])
@@ -196,6 +216,7 @@
 
     @decorators.idempotent_id('fdcd9b33-0903-4e00-a1f7-b5f6543068d6')
     def test_create_server_with_scheduling_hint(self):
+        """Test creating server with scheduling hint"""
         # Create a server with scheduler hints.
         hints = {
             'same_host': self.s1_id
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index f720b84..f52d4c0 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -26,7 +26,7 @@
 
 
 class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Servers API using admin privileges"""
+    """Negative Tests of Servers API using admin privileges"""
 
     @classmethod
     def setup_clients(cls):
@@ -47,6 +47,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_ram(self):
+        """Test resizing server using over limit ram should fail"""
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
         quota_set = self.quotas_client.show_quota_set(
@@ -69,6 +70,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_vcpus(self):
+        """Test resizing server using over limit vcpus should fail"""
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
         quota_set = self.quotas_client.show_quota_set(
@@ -89,6 +91,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b0b4d8af-1256-41ef-9ee7-25f1c19dde80')
     def test_reset_state_server_invalid_state(self):
+        """Test resetting server state to invalid state value should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state='invalid')
@@ -96,6 +99,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4cdcc984-fab0-4577-9a9d-6d558527ee9d')
     def test_reset_state_server_invalid_type(self):
+        """Test resetting server state to invalid state type should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state=1)
@@ -103,13 +107,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e741298b-8df2-46f0-81cb-8f814ff2504c')
     def test_reset_state_server_nonexistent_server(self):
+        """Test resetting a non existent server's state should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.reset_state, '999', state='error')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('46a4e1ca-87ae-4d28-987a-1b6b136a0221')
     def test_migrate_non_existent_server(self):
-        # migrate a non existent server
+        """Test migrating a non existent server should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.migrate_server,
                           data_utils.rand_uuid())
@@ -121,6 +126,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_migrate_server_invalid_state(self):
+        """Test migrating a server with invalid state should fail"""
         # create server.
         server = self.create_test_server(wait_until='ACTIVE')
         server_id = server['id']
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index bebc8c5..f440428 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -23,7 +23,7 @@
 
 
 class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
-
+    """Test creating servers on mutiple nodes with scheduler_hints."""
     @classmethod
     def resource_setup(cls):
         super(ServersOnMultiNodesTest, cls).resource_setup()
@@ -65,6 +65,7 @@
         compute.is_scheduler_filter_enabled("SameHostFilter"),
         'SameHostFilter is not available.')
     def test_create_servers_on_same_host(self):
+        """Test creating servers with hints 'same_host'"""
         hints = {'same_host': self.server01}
         server02 = self.create_test_server(scheduler_hints=hints,
                                            wait_until='ACTIVE')['id']
@@ -76,6 +77,7 @@
         compute.is_scheduler_filter_enabled("DifferentHostFilter"),
         'DifferentHostFilter is not available.')
     def test_create_servers_on_different_hosts(self):
+        """Test creating servers with hints of single 'different_host'"""
         hints = {'different_host': self.server01}
         server02 = self.create_test_server(scheduler_hints=hints,
                                            wait_until='ACTIVE')['id']
@@ -87,7 +89,7 @@
         compute.is_scheduler_filter_enabled("DifferentHostFilter"),
         'DifferentHostFilter is not available.')
     def test_create_servers_on_different_hosts_with_list_of_servers(self):
-        # This scheduler-hint supports list of servers also.
+        """Test creating servers with hints of a list of 'different_host'"""
         hints = {'different_host': [self.server01]}
         server02 = self.create_test_server(scheduler_hints=hints,
                                            wait_until='ACTIVE')['id']
diff --git a/tempest/api/compute/admin/test_services.py b/tempest/api/compute/admin/test_services.py
index 73e191b..24518a8 100644
--- a/tempest/api/compute/admin/test_services.py
+++ b/tempest/api/compute/admin/test_services.py
@@ -19,7 +19,10 @@
 
 
 class ServicesAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Services API. List and Enable/Disable require admin privileges."""
+    """Tests Nova Services API.
+
+    List and Enable/Disable require admin privileges.
+    """
 
     @classmethod
     def setup_clients(cls):
@@ -28,11 +31,13 @@
 
     @decorators.idempotent_id('5be41ef4-53d1-41cc-8839-5c2a48a1b283')
     def test_list_services(self):
+        """Listing nova services"""
         services = self.client.list_services()['services']
         self.assertNotEmpty(services)
 
     @decorators.idempotent_id('f345b1ec-bc6e-4c38-a527-3ca2bc00bef5')
     def test_get_service_by_service_binary_name(self):
+        """Listing nova services by binary name"""
         binary_name = 'nova-compute'
         services = self.client.list_services(binary=binary_name)['services']
         self.assertNotEmpty(services)
@@ -41,6 +46,7 @@
 
     @decorators.idempotent_id('affb42d5-5b4b-43c8-8b0b-6dca054abcca')
     def test_get_service_by_host_name(self):
+        """Listing nova services by host name"""
         services = self.client.list_services()['services']
         host_name = services[0]['host']
         services_on_host = [service for service in services if
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index d264829..a4d7d3f 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -31,14 +31,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1126d1f8-266e-485f-a687-adc547492646')
     def test_list_services_with_non_admin_user(self):
+        """Non admin user is not allowed to list nova services"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_client.list_services)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d0884a69-f693-4e79-a9af-232d15643bf7')
     def test_get_service_by_invalid_params(self):
-        # Expect all services to be returned when the request contains invalid
-        # parameters.
+        """Test listing services by invalid filter should return all services
+
+        Expect all services to be returned when the request contains invalid
+        parameters.
+        """
+        if not self.is_requested_microversion_compatible('2.74'):
+            raise self.skipException(
+                "From microversion 2.75 invalid parameters are not allowed.")
         services = self.client.list_services()['services']
         services_xxx = (self.client.list_services(xxx='nova-compute')
                         ['services'])
@@ -47,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1e966d4a-226e-47c7-b601-0b18a27add54')
     def test_get_service_by_invalid_service_and_valid_host(self):
+        """Test listing services by invalid service and valid host value"""
         services = self.client.list_services()['services']
         host_name = services[0]['host']
         services = self.client.list_services(host=host_name,
@@ -56,6 +64,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('64e7e7fb-69e8-4cb6-a71d-8d5eb0c98655')
     def test_get_service_with_valid_service_and_invalid_host(self):
+        """Test listing services by valid service and invalid host value"""
         services = self.client.list_services()['services']
         binary_name = services[0]['binary']
         services = self.client.list_services(host='xxx',
@@ -79,6 +88,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('508671aa-c929-4479-bd10-8680d40dd0a6')
     def test_enable_service_with_invalid_service_id(self):
+        """Test updating non existing service to status enabled"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_service,
                           service_id=self.fake_service_id,
@@ -87,6 +97,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a9eeeade-42b3-419f-87aa-c9342aa068cf')
     def test_disable_service_with_invalid_service_id(self):
+        """Test updating non existing service to status disabled"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_service,
                           service_id=self.fake_service_id,
@@ -95,6 +106,8 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f46a9d91-1e85-4b96-8e7a-db7706fa2e9a')
     def test_disable_log_reason_with_invalid_service_id(self):
+        """Test updating non existing service to disabled with reason"""
+
         # disabled_reason requires that status='disabled' be provided.
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_service,
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index d4c60b3..c24f420 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -26,6 +26,7 @@
 
 
 class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
+    """Test tenant usages"""
 
     @classmethod
     def setup_clients(cls):
@@ -67,7 +68,7 @@
 
     @decorators.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
     def test_list_usage_all_tenants(self):
-        # Get usage for all tenants
+        """Test getting usage for all tenants"""
         tenant_usage = self.call_until_valid(
             self.adm_client.list_tenant_usages, VALID_WAIT,
             start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
@@ -75,7 +76,7 @@
 
     @decorators.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
     def test_get_usage_tenant(self):
-        # Get usage for a specific tenant
+        """Test getting usage for a specific tenant"""
         tenant_usage = self.call_until_valid(
             self.adm_client.show_tenant_usage, VALID_WAIT,
             self.tenant_id, start=self.start, end=self.end)['tenant_usage']
@@ -84,7 +85,7 @@
 
     @decorators.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
     def test_get_usage_tenant_with_non_admin_user(self):
-        # Get usage for a specific tenant with non admin user
+        """Test getting usage for a specific tenant with non admin user"""
         tenant_usage = self.call_until_valid(
             self.client.show_tenant_usage, VALID_WAIT,
             self.tenant_id, start=self.start, end=self.end)['tenant_usage']
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index cb60b8d..4b5a5d5 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -21,6 +21,7 @@
 
 
 class TenantUsagesNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Negative tests of compute tenant usages API"""
 
     @classmethod
     def setup_clients(cls):
@@ -43,7 +44,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8b21e135-d94b-4991-b6e9-87059609c8ed')
     def test_get_usage_tenant_with_empty_tenant_id(self):
-        # Get usage for a specific tenant empty
+        """Test getting tenant usage with empty tenant id should fail"""
         params = {'start': self.start,
                   'end': self.end}
         self.assertRaises(lib_exc.NotFound,
@@ -53,7 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4079dd2a-9e8d-479f-869d-6fa985ce45b6')
     def test_get_usage_tenant_with_invalid_date(self):
-        # Get usage for tenant with invalid date
+        """Test getting tenant usage with invalid time range should fail"""
         params = {'start': self.end,
                   'end': self.start}
         self.assertRaises(lib_exc.BadRequest,
@@ -63,7 +64,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bbe6fe2c-15d8-404c-a0a2-44fad0ad5cc7')
     def test_list_usage_all_tenants_with_non_admin_user(self):
-        # Get usage for all tenants with non admin user
+        """Test listing usage of all tenants by non-admin user is forbidden"""
         params = {'start': self.start,
                   'end': self.end,
                   'detailed': "1"}
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
new file mode 100644
index 0000000..cf8c560
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume.py
@@ -0,0 +1,116 @@
+# Copyright 2020 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseAttachSCSIVolumeTest(base.BaseV2ComputeAdminTest):
+    """Base class for the admin volume tests in this module."""
+    create_default_network = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseAttachSCSIVolumeTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
+
+    def _create_image_with_custom_property(self, **kwargs):
+        """Wrapper utility that returns the custom image.
+
+        Creates a new image by downloading the default image's bits and
+        uploading them to a new image. Any kwargs are set as image properties
+        on the new image.
+
+        :param return image_id: The UUID of the newly created image.
+        """
+        image = self.image_client.show_image(CONF.compute.image_ref)
+        image_data = self.image_client.show_image_file(
+            CONF.compute.image_ref).data
+        image_file = io.BytesIO(image_data)
+        create_dict = {
+            'container_format': image['container_format'],
+            'disk_format': image['disk_format'],
+            'min_disk': image['min_disk'],
+            'min_ram': image['min_ram'],
+            'visibility': 'public',
+        }
+        create_dict.update(kwargs)
+        new_image = self.image_client.create_image(**create_dict)
+        self.addCleanup(self.image_client.wait_for_resource_deletion,
+                        new_image['id'])
+        self.addCleanup(self.image_client.delete_image, new_image['id'])
+        self.image_client.store_image_file(new_image['id'], image_file)
+
+        return new_image['id']
+
+
+class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
+    """Test attaching scsi volume to server"""
+
+    @decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
+    def test_attach_scsi_disk_with_config_drive(self):
+        """Test the attach/detach volume with config drive/scsi disk
+
+        Enable the config drive, followed by booting an instance
+        from an image with meta properties hw_cdrom: scsi and use
+        virtio-scsi mode with further asserting list volume attachments
+        in instance after attach and detach of the volume.
+        """
+        custom_img = self._create_image_with_custom_property(
+            hw_scsi_model='virtio-scsi',
+            hw_disk_bus='scsi',
+            hw_cdrom_bus='scsi')
+        server = self.create_test_server(image_id=custom_img,
+                                         config_drive=True,
+                                         wait_until='ACTIVE')
+
+        # NOTE(lyarwood): self.create_test_server delete the server
+        # at class level cleanup so add server cleanup to ensure that
+        # the instance is deleted first before created image. This
+        # avoids failures when using the rbd backend is used for both
+        # Glance and Nova ephemeral storage. Also wait until server is
+        # deleted otherwise image deletion can start before server is
+        # deleted.
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        volume = self.create_volume()
+        attachment = self.attach_volume(server, volume)
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'in-use')
+        volume_after_attach = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(1, len(volume_after_attach),
+                         "Failed to attach volume")
+        self.servers_client.detach_volume(
+            server['id'], attachment['volumeId'])
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'available')
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 371b506..c1236a7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -23,6 +23,7 @@
 
 
 class TestVolumeSwapBase(base.BaseV2ComputeAdminTest):
+    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -67,21 +68,7 @@
 
 
 class TestVolumeSwap(TestVolumeSwapBase):
-    """The test suite for swapping of volume with admin user.
-
-    The following is the scenario outline:
-
-    1. Create a volume "volume1" with non-admin.
-    2. Create a volume "volume2" with non-admin.
-    3. Boot an instance "instance1" with non-admin.
-    4. Attach "volume1" to "instance1" with non-admin.
-    5. Swap volume from "volume1" to "volume2" as admin.
-    6. Check the swap volume is successful and "volume2"
-       is attached to "instance1" and "volume1" is in available state.
-    7. Swap volume from "volume2" to "volume1" as admin.
-    8. Check the swap volume is successful and "volume1"
-       is attached to "instance1" and "volume2" is in available state.
-    """
+    """The test suite for swapping of volume with admin user"""
 
     # NOTE(mriedem): This is an uncommon scenario to call the compute API
     # to swap volumes directly; swap volume is primarily only for volume
@@ -91,6 +78,21 @@
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @utils.services('volume')
     def test_volume_swap(self):
+        """Test swapping of volume attached to server with admin user
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot an instance "instance1" with non-admin.
+        4. Attach "volume1" to "instance1" with non-admin.
+        5. Swap volume from "volume1" to "volume2" as admin.
+        6. Check the swap volume is successful and "volume2"
+           is attached to "instance1" and "volume1" is in available state.
+        7. Swap volume from "volume2" to "volume1" as admin.
+        8. Check the swap volume is successful and "volume1"
+           is attached to "instance1" and "volume2" is in available state.
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
@@ -133,6 +135,12 @@
 
 
 class TestMultiAttachVolumeSwap(TestVolumeSwapBase):
+    """Test swapping volume attached to multiple servers
+
+    Test swapping volume attached to multiple servers with microversion
+    greater than 2.59
+    """
+
     min_microversion = '2.60'
     max_microversion = 'latest'
 
@@ -163,6 +171,20 @@
                              condition=CONF.compute.min_compute_nodes > 1)
     @utils.services('volume')
     def test_volume_swap_with_multiattach(self):
+        """Test swapping volume attached to multiple servers
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot 2 instances "server1" and "server2" with non-admin.
+        4. Attach "volume1" to "server1" with non-admin.
+        5. Attach "volume1" to "server2" with non-admin.
+        6. Swap "volume1" to "volume2" on "server1"
+        7. Check "volume1" is attached to "server2" and not attached to
+           "server1"
+        8. Check "volume2" is attached to "server1".
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 4a7f36f..10d522b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest.common import utils
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -22,6 +23,9 @@
 
 
 class VolumesAdminNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of volume swapping"""
+
+    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -38,6 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
+        """Test swapping non existent volume should fail"""
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -49,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
+        """Test swapping volume to a non existence volume should fail"""
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -57,3 +63,81 @@
                           self.admin_servers_client.update_attached_volume,
                           self.server['id'], volume['id'],
                           volumeId=nonexistent_volume)
+
+
+class UpdateMultiattachVolumeNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of swapping volume attached to multiple servers
+
+    Negative tests of swapping volume attached to multiple servers with
+    compute microversion greater than 2.59 and volume microversion greater
+    than 3.26
+    """
+
+    min_microversion = '2.60'
+    volume_min_microversion = '3.27'
+
+    @classmethod
+    def skip_checks(cls):
+        super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
+        if not CONF.compute_feature_enabled.volume_multiattach:
+            raise cls.skipException('Volume multi-attach is not available.')
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
+    @utils.services('volume')
+    def test_multiattach_rw_volume_update_failure(self):
+        """Test swapping volume attached to multi-servers with read-write mode
+
+        1. Create two volumes "vol1" and "vol2"
+        2. Create two instances "server1" and "server2"
+        3. Attach "vol1" to both of these instances
+        4. By default both of these attachments should have an attach_mode of
+           read-write, so trying to swap "vol1" to "vol2" should fail
+        5. Check "vol1" is still attached to both servers
+        6. Check "vol2" is not attached to any server
+        """
+        # Create two multiattach capable volumes.
+        vol1 = self.create_volume(multiattach=True)
+        vol2 = self.create_volume(multiattach=True)
+
+        # Create two instances.
+        server1 = self.create_test_server(wait_until='ACTIVE')
+        server2 = self.create_test_server(wait_until='ACTIVE')
+
+        # Attach vol1 to both of these instances.
+        vol1_attachment1 = self.attach_volume(server1, vol1)
+        vol1_attachment2 = self.attach_volume(server2, vol1)
+
+        # Assert that we now have two attachments.
+        vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+        self.assertEqual(2, len(vol1['attachments']))
+
+        # By default both of these attachments should have an attach_mode of
+        # read-write, assert that here to ensure the following calls to update
+        # the volume will be rejected.
+        for volume_attachment in vol1['attachments']:
+            attachment_id = volume_attachment['attachment_id']
+            attachment = self.attachments_client.show_attachment(
+                attachment_id)['attachment']
+            self.assertEqual('rw', attachment['attach_mode'])
+
+        # Assert that a BadRequest is raised when we attempt to update volume1
+        # to volume2 on server1 or server2.
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_servers_client.update_attached_volume,
+                          server1['id'], vol1['id'], volumeId=vol2['id'])
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_servers_client.update_attached_volume,
+                          server2['id'], vol1['id'], volumeId=vol2['id'])
+
+        # Fetch the volume 1 to check the current attachments.
+        vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+        vol1_attachment_ids = [a['id'] for a in vol1['attachments']]
+
+        # Assert that volume 1 is still attached to both server 1 and 2.
+        self.assertIn(vol1_attachment1['id'], vol1_attachment_ids)
+        self.assertIn(vol1_attachment2['id'], vol1_attachment_ids)
+
+        # Assert that volume 2 has no attachments.
+        vol2 = self.volumes_client.show_volume(vol2['id'])['volume']
+        self.assertEqual([], vol2['attachments'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8d0962d..922a14c 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -108,6 +108,8 @@
         cls.versions_client = cls.os_primary.compute_versions_client
         if CONF.service_available.cinder:
             cls.volumes_client = cls.os_primary.volumes_client_latest
+            cls.attachments_client = cls.os_primary.attachments_client_latest
+            cls.snapshots_client = cls.os_primary.snapshots_client_latest
         if CONF.service_available.glance:
             if CONF.image_feature_enabled.api_v1:
                 cls.images_client = cls.os_primary.image_client
@@ -169,8 +171,11 @@
         cls.flavor_ref = CONF.compute.flavor_ref
         cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
         cls.ssh_user = CONF.validation.image_ssh_user
+        cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_user = CONF.validation.image_ssh_user
+        cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_password = CONF.validation.image_ssh_password
+        cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
 
     @classmethod
     def is_requested_microversion_compatible(cls, max_version):
@@ -226,7 +231,7 @@
 
     @classmethod
     def create_test_server(cls, validatable=False, volume_backed=False,
-                           validation_resources=None, **kwargs):
+                           validation_resources=None, clients=None, **kwargs):
         """Wrapper utility that returns a test server.
 
         This wrapper utility calls the common create test server and
@@ -238,6 +243,7 @@
         :param volume_backed: Whether the instance is volume backed or not.
         :param validation_resources: Dictionary of validation resources as
             returned by `get_class_validation_resources`.
+        :param clients: Client manager, defaults to os_primary.
         :param kwargs: Extra arguments are passed down to the
             `compute.create_test_server` call.
         """
@@ -254,8 +260,11 @@
             not tenant_network):
             kwargs['networks'] = 'none'
 
+        if clients is None:
+            clients = cls.os_primary
+
         body, servers = compute.create_test_server(
-            cls.os_primary,
+            clients,
             validatable,
             validation_resources=validation_resources,
             tenant_network=tenant_network,
@@ -266,11 +275,11 @@
         # and then wait for all
         for server in servers:
             cls.addClassResourceCleanup(waiters.wait_for_server_termination,
-                                        cls.servers_client, server['id'])
+                                        clients.servers_client, server['id'])
         for server in servers:
             cls.addClassResourceCleanup(
                 test_utils.call_and_ignore_notfound_exc,
-                cls.servers_client.delete_server, server['id'])
+                clients.servers_client.delete_server, server['id'])
 
         return body
 
@@ -445,6 +454,12 @@
         server = self.servers_client.show_server(server_id)['server']
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
+    def reboot_server(self, server_id, type):
+        """Reboot a server and wait for it to be ACTIVE."""
+        self.servers_client.reboot_server(server_id, type=type)
+        waiters.wait_for_server_status(
+            self.servers_client, server_id, 'ACTIVE')
+
     @classmethod
     def delete_volume(cls, volume_id):
         """Deletes the given volume and waits for it to be gone."""
@@ -506,6 +521,12 @@
             kwargs['display_name'] = vol_name
         if image_ref is not None:
             kwargs['imageRef'] = image_ref
+        if CONF.volume.volume_type and 'volume_type' not in kwargs:
+            # If volume_type is not provided in config then no need to
+            # add a volume type and
+            # if volume_type has already been added by child class then
+            # no need to override.
+            kwargs['volume_type'] = CONF.volume.volume_type
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
                               CONF.compute.compute_volume_common_az)
@@ -555,24 +576,54 @@
 
         attachment = self.servers_client.attach_volume(
             server['id'], **attach_kwargs)['volumeAttachment']
-        # On teardown detach the volume and for multiattach volumes wait for
-        # the attachment to be removed. For non-multiattach volumes wait for
-        # the state of the volume to change to available. This is so we don't
-        # error out when trying to delete the volume during teardown.
-        if volume['multiattach']:
-            self.addCleanup(waiters.wait_for_volume_attachment_remove,
-                            self.volumes_client, volume['id'],
-                            attachment['id'])
-        else:
-            self.addCleanup(waiters.wait_for_volume_resource_status,
-                            self.volumes_client, volume['id'], 'available')
-        # Ignore 404s on detach in case the server is deleted or the volume
-        # is already detached.
+
+        # NOTE(lyarwood): During attach we initially wait for the volume
+        # attachment and then check the volume state.
+        waiters.wait_for_volume_attachment_create(
+            self.volumes_client, volume['id'], server['id'])
+        # TODO(lyarwood): Remove the following volume status checks and move to
+        # attachment status checks across all volumes now with the 3.27
+        # microversion somehow.
+        if not volume['multiattach']:
+            waiters.wait_for_volume_resource_status(
+                self.volumes_client, volume['id'], 'in-use')
+
+        # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume
+        # attachment in Nova to be removed. While this technically happens last
+        # we want this to be the first waiter as if it fails we can then dump
+        # the contents of the console log. The final check of the volume state
+        # should be a no-op by this point and is just added for completeness
+        # when detaching non-multiattach volumes.
+        if not volume['multiattach']:
+            self.addCleanup(
+                waiters.wait_for_volume_resource_status, self.volumes_client,
+                volume['id'], 'available')
+        self.addCleanup(
+            waiters.wait_for_volume_attachment_remove_from_server,
+            self.servers_client, server['id'], volume['id'])
         self.addCleanup(self._detach_volume, server, volume)
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'in-use')
+
         return attachment
 
+    def create_volume_snapshot(self, volume_id, name=None, description=None,
+                               metadata=None, force=False):
+        name = name or data_utils.rand_name(
+            self.__class__.__name__ + '-snapshot')
+        snapshot = self.snapshots_client.create_snapshot(
+            volume_id=volume_id,
+            force=force,
+            display_name=name,
+            description=description,
+            metadata=metadata)['snapshot']
+        self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
+                        snapshot['id'])
+        self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+        waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                snapshot['id'], 'available')
+        snapshot = self.snapshots_client.show_snapshot(
+            snapshot['id'])['snapshot']
+        return snapshot
+
     def assert_flavor_equal(self, flavor_id, server_flavor):
         """Check whether server_flavor equals to flavor.
 
@@ -607,6 +658,9 @@
             cls.os_admin.availability_zone_client)
         cls.admin_flavors_client = cls.os_admin.flavors_client
         cls.admin_servers_client = cls.os_admin.servers_client
+        cls.image_client = cls.os_admin.image_client_v2
+        cls.admin_assisted_volume_snapshots_client = \
+            cls.os_admin.assisted_volume_snapshots_client
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index 0e6c016..5917931 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -21,6 +21,7 @@
 
 
 class CertificatesV2TestJSON(base.BaseV2ComputeTest):
+    """Test Certificates API"""
 
     @classmethod
     def skip_checks(cls):
@@ -30,10 +31,10 @@
 
     @decorators.idempotent_id('c070a441-b08e-447e-a733-905909535b1b')
     def test_create_root_certificate(self):
-        # create certificates
+        """Test creating root certificate"""
         self.certificates_client.create_certificate()
 
     @decorators.idempotent_id('3ac273d0-92d2-4632-bdfc-afbc21d4606c')
     def test_get_root_certificate(self):
-        # get the root certificate
+        """Test getting root certificate details"""
         self.certificates_client.show_certificate('root')
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 20294e9..9ab75c5 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -18,20 +18,24 @@
 
 
 class FlavorsV2TestJSON(base.BaseV2ComputeTest):
+    """Tests Flavors"""
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59')
     def test_list_flavors(self):
-        # List of all flavors should contain the expected flavor
+        """List of all flavors should contain the expected flavor"""
         flavors = self.flavors_client.list_flavors()['flavors']
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
                              'name': flavor['name']}
+        # description field is added to the response of list_flavors in 2.55
+        if not self.is_requested_microversion_compatible('2.54'):
+            flavor_min_detail.update({'description': flavor['description']})
         self.assertIn(flavor_min_detail, flavors)
 
     @decorators.idempotent_id('6e85fde4-b3cd-4137-ab72-ed5f418e8c24')
     def test_list_flavors_with_detail(self):
-        # Detailed list of all flavors should contain the expected flavor
+        """Detailed list of all flavors should contain the expected flavor"""
         flavors = self.flavors_client.list_flavors(detail=True)['flavors']
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         self.assertIn(flavor, flavors)
@@ -39,20 +43,20 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('1f12046b-753d-40d2-abb6-d8eb8b30cb2f')
     def test_get_flavor(self):
-        # The expected flavor details should be returned
+        """The expected flavor details should be returned"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         self.assertEqual(self.flavor_ref, flavor['id'])
 
     @decorators.idempotent_id('8d7691b3-6ed4-411a-abc9-2839a765adab')
     def test_list_flavors_limit_results(self):
-        # Only the expected number of flavors should be returned
+        """Only the expected number of flavors should be returned"""
         params = {'limit': 1}
         flavors = self.flavors_client.list_flavors(**params)['flavors']
         self.assertEqual(1, len(flavors))
 
     @decorators.idempotent_id('b26f6327-2886-467a-82be-cef7a27709cb')
     def test_list_flavors_detailed_limit_results(self):
-        # Only the expected number of flavors (detailed) should be returned
+        """Only the expected number of flavors(detailed) should be returned"""
         params = {'limit': 1}
         flavors = self.flavors_client.list_flavors(detail=True,
                                                    **params)['flavors']
@@ -60,7 +64,7 @@
 
     @decorators.idempotent_id('e800f879-9828-4bd0-8eae-4f17189951fb')
     def test_list_flavors_using_marker(self):
-        # The list of flavors should start from the provided marker
+        """The list of flavors should start from the provided marker"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
@@ -71,7 +75,7 @@
 
     @decorators.idempotent_id('6db2f0c0-ddee-4162-9c84-0703d3dd1107')
     def test_list_flavors_detailed_using_marker(self):
-        # The list of flavors should start from the provided marker
+        """The list of flavors should start from the provided marker"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
@@ -83,7 +87,7 @@
 
     @decorators.idempotent_id('3df2743e-3034-4e57-a4cb-b6527f6eac79')
     def test_list_flavors_detailed_filter_by_min_disk(self):
-        # The detailed list of flavors should be filtered by disk space
+        """The detailed list of flavors should be filtered by disk space"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
@@ -94,7 +98,7 @@
 
     @decorators.idempotent_id('09fe7509-b4ee-4b34-bf8b-39532dc47292')
     def test_list_flavors_detailed_filter_by_min_ram(self):
-        # The detailed list of flavors should be filtered by RAM
+        """The detailed list of flavors should be filtered by RAM"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
@@ -105,7 +109,7 @@
 
     @decorators.idempotent_id('10645a4d-96f5-443f-831b-730711e11dd4')
     def test_list_flavors_filter_by_min_disk(self):
-        # The list of flavors should be filtered by disk space
+        """The list of flavors should be filtered by disk space"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
@@ -115,7 +119,7 @@
 
     @decorators.idempotent_id('935cf550-e7c8-4da6-8002-00f92d5edfaa')
     def test_list_flavors_filter_by_min_ram(self):
-        # The list of flavors should be filtered by RAM
+        """The list of flavors should be filtered by RAM"""
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_id = flavor['id']
 
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 235049a..5d6a7d7 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -13,10 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
 import random
 
-import six
-
 from tempest.api.compute import base
 from tempest.common import image as common_image
 from tempest.common import utils
@@ -44,7 +43,7 @@
             CONF.compute.flavor_ref)['flavor']
         min_img_ram = flavor['ram'] + 1
         size = random.randint(1024, 4096)
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         params = {
             'name': data_utils.rand_name('image'),
             'container_format': CONF.image.container_formats[0],
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions.py b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
index 2adc482..6097bbc 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions.py
@@ -25,13 +25,13 @@
 
 
 class FloatingIPsTestJSON(base.BaseFloatingIPsTest):
+    """Test floating ips API with compute microversion less than 2.36"""
 
     max_microversion = '2.35'
 
     @decorators.idempotent_id('f7bfb946-297e-41b8-9e8c-aba8e9bb5194')
     def test_allocate_floating_ip(self):
-        # Positive test:Allocation of a new floating IP to a project
-        # should be successful
+        """Test allocating a floating ip to a project"""
         body = self.client.create_floating_ip(
             pool=CONF.network.floating_network_name)['floating_ip']
         floating_ip_id_allocated = body['id']
@@ -45,8 +45,7 @@
 
     @decorators.idempotent_id('de45e989-b5ca-4a9b-916b-04a52e7bbb8b')
     def test_delete_floating_ip(self):
-        # Positive test:Deletion of valid floating IP from project
-        # should be successful
+        """Test deleting a valid floating ip from project"""
         # Creating the floating IP that is to be deleted in this method
         floating_ip_body = self.client.create_floating_ip(
             pool=CONF.network.floating_network_name)['floating_ip']
@@ -59,6 +58,7 @@
 
 
 class FloatingIPsAssociationTestJSON(base.BaseFloatingIPsTest):
+    """Test floating ips association with microversion less than 2.44"""
 
     max_microversion = '2.43'
 
@@ -80,9 +80,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_associate_disassociate_floating_ip(self):
-        # Positive test:Associate and disassociate the provided floating IP
-        # to a specific server should be successful
-
+        """Test associate/disassociate floating ip to a server"""
         # Association of floating IP to fixed IP address
         self.client.associate_floating_ip_to_server(
             self.floating_ip,
@@ -102,6 +100,12 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_associate_already_associated_floating_ip(self):
+        """Test associating an already associated floating ip
+
+        First associate a floating ip to server1, then associate the floating
+        ip to server2, the floating ip will be associated to server2 and no
+        longer associated to server1.
+        """
         # positive test:Association of an already associated floating IP
         # to specific server should change the association of the Floating IP
         # Create server so as to use for Multiple association
diff --git a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
index 9257458..e99e218 100644
--- a/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
+++ b/tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py
@@ -25,6 +25,7 @@
 
 
 class FloatingIPsNegativeTestJSON(base.BaseFloatingIPsTest):
+    """Test floating ips API with compute microversion less than 2.36"""
 
     max_microversion = '2.35'
 
@@ -46,8 +47,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6e0f059b-e4dd-48fb-8207-06e3bba5b074')
     def test_allocate_floating_ip_from_nonexistent_pool(self):
-        # Negative test:Allocation of a new floating IP from a nonexistent_pool
-        # to a project should fail
+        """Test allocating floating ip from non existent pool should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.create_floating_ip,
                           pool="non_exist_pool")
@@ -55,15 +55,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ae1c55a8-552b-44d4-bfb6-2a115a15d0ba')
     def test_delete_nonexistent_floating_ip(self):
-        # Negative test:Deletion of a nonexistent floating IP
-        # from project should fail
-
+        """Test deleting non existent floating ip should fail"""
         # Deleting the non existent floating IP
         self.assertRaises(lib_exc.NotFound, self.client.delete_floating_ip,
                           self.non_exist_id)
 
 
 class FloatingIPsAssociationNegativeTestJSON(base.BaseFloatingIPsTest):
+    """Test floating ips API with compute microversion less than 2.44"""
 
     max_microversion = '2.43'
 
@@ -76,8 +75,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('595fa616-1a71-4670-9614-46564ac49a4c')
     def test_associate_nonexistent_floating_ip(self):
-        # Negative test:Association of a non existent floating IP
-        # to specific server should fail
+        """Test associating non existent floating ip to server should fail"""
         # Associating non existent floating IP
         self.assertRaises(lib_exc.NotFound,
                           self.client.associate_floating_ip_to_server,
@@ -86,7 +84,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0a081a66-e568-4e6b-aa62-9587a876dca8')
     def test_dissociate_nonexistent_floating_ip(self):
-        # Negative test:Dissociation of a non existent floating IP should fail
+        """Test dissociating non existent floating ip should fail"""
         # Dissociating non existent floating IP
         self.assertRaises(lib_exc.NotFound,
                           self.client.disassociate_floating_ip_from_server,
@@ -95,7 +93,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('804b4fcb-bbf5-412f-925d-896672b61eb3')
     def test_associate_ip_to_server_without_passing_floating_ip(self):
-        # Negative test:Association of empty floating IP to specific server
+        """Test associating empty floating ip to server should fail"""
         # should raise NotFound or BadRequest(In case of Nova V2.1) exception.
         self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
                           self.client.associate_floating_ip_to_server,
@@ -106,10 +104,13 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_associate_ip_to_server_with_floating_ip(self):
-        # The VM have one port
-        # Associate floating IP A to the VM
-        # Associate floating IP B which is from same pool with floating IP A
-        # to the VM, should raise BadRequest exception
+        """Test associating floating ip to server already with floating ip
+
+        1. The VM have one port
+        2. Associate floating IP A to the VM
+        3. Associate floating IP B which is from same pool with floating IP A
+           to the VM, should raise BadRequest exception
+        """
         body = self.client.create_floating_ip(
             pool=CONF.network.public_network_id)['floating_ip']
         self.addCleanup(self.client.delete_floating_ip, body['id'])
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips.py b/tempest/api/compute/floating_ips/test_list_floating_ips.py
index 944f798..6bfee95 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips.py
@@ -21,6 +21,7 @@
 
 
 class FloatingIPDetailsTestJSON(base.BaseFloatingIPsTest):
+    """Test floating ip details with compute microversion less than 2.36"""
 
     max_microversion = '2.35'
 
@@ -37,7 +38,7 @@
 
     @decorators.idempotent_id('16db31c3-fb85-40c9-bbe2-8cf7b67ff99f')
     def test_list_floating_ips(self):
-        # Positive test:Should return the list of floating IPs
+        """Test listing floating ips"""
         body = self.client.list_floating_ips()['floating_ips']
         floating_ips = body
         self.assertNotEmpty(floating_ips,
@@ -47,7 +48,7 @@
 
     @decorators.idempotent_id('eef497e0-8ff7-43c8-85ef-558440574f84')
     def test_get_floating_ip_details(self):
-        # Positive test:Should be able to GET the details of floatingIP
+        """Test getting floating ip details"""
         # Creating a floating IP for which details are to be checked
         body = self.client.create_floating_ip(
             pool=CONF.network.floating_network_name)['floating_ip']
@@ -68,7 +69,7 @@
 
     @decorators.idempotent_id('df389fc8-56f5-43cc-b290-20eda39854d3')
     def test_list_floating_ip_pools(self):
-        # Positive test:Should return the list of floating IP Pools
+        """Test listing floating ip pools"""
         floating_ip_pools = self.pools_client.list_floating_ip_pools()
         self.assertNotEmpty(floating_ip_pools['floating_ip_pools'],
                             "Expected floating IP Pools. Got zero.")
diff --git a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
index d69248c..aa0320d 100644
--- a/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
+++ b/tempest/api/compute/floating_ips/test_list_floating_ips_negative.py
@@ -23,14 +23,18 @@
 
 
 class FloatingIPDetailsNegativeTestJSON(base.BaseFloatingIPsTest):
+    """Negative tests of floating ip detail
+
+    Negative tests of floating ip detail with compute microversion less
+    than 2.36.
+    """
 
     max_microversion = '2.35'
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7ab18834-4a4b-4f28-a2c5-440579866695')
     def test_get_nonexistent_floating_ip_details(self):
-        # Negative test:Should not be able to GET the details
-        # of non-existent floating IP
+        """Test getting non existent floating ip should fail"""
         # Creating a non-existent floatingIP id
         if CONF.service_available.neutron:
             non_exist_id = data_utils.rand_uuid()
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 1f3af5f..ece983d 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.compute import base
 from tempest.common import image as common_image
@@ -28,6 +28,8 @@
 
 
 class ImagesMetadataTestJSON(base.BaseV2ComputeTest):
+    """Test image metadata with compute microversion less than 2.39"""
+
     max_microversion = '2.38'
 
     @classmethod
@@ -75,7 +77,7 @@
         cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                     cls.glance_client.delete_image,
                                     cls.image_id)
-        image_file = six.BytesIO((b'*' * 1024))
+        image_file = io.BytesIO((b'*' * 1024))
         if CONF.image_feature_enabled.api_v1:
             cls.glance_client.update_image(cls.image_id, data=image_file)
         else:
@@ -89,7 +91,10 @@
 
     @decorators.idempotent_id('37ec6edd-cf30-4c53-bd45-ae74db6b0531')
     def test_list_image_metadata(self):
-        # All metadata key/value pairs for an image should be returned
+        """Test listing image metadata
+
+        All metadata key/value pairs for an image should be returned.
+        """
         resp_metadata = self.client.list_image_metadata(self.image_id)
         expected = {'metadata': {
             'os_version': 'value1', 'os_distro': 'value2'}}
@@ -97,7 +102,10 @@
 
     @decorators.idempotent_id('ece7befc-d3ce-42a4-b4be-c3067a418c29')
     def test_set_image_metadata(self):
-        # The metadata for the image should match the new values
+        """Test setting image metadata
+
+        The metadata for the image should match the new values.
+        """
         req_metadata = {'os_version': 'value2', 'architecture': 'value3'}
         self.client.set_image_metadata(self.image_id,
                                        req_metadata)
@@ -108,7 +116,10 @@
 
     @decorators.idempotent_id('7b491c11-a9d5-40fe-a696-7f7e03d3fea2')
     def test_update_image_metadata(self):
-        # The metadata for the image should match the updated values
+        """Test updating image medata
+
+        The metadata for the image should match the updated values.
+        """
         req_metadata = {'os_version': 'alt1', 'architecture': 'value3'}
         self.client.update_image_metadata(self.image_id,
                                           req_metadata)
@@ -122,15 +133,21 @@
 
     @decorators.idempotent_id('4f5db52f-6685-4c75-b848-f4bb363f9aa6')
     def test_get_image_metadata_item(self):
-        # The value for a specific metadata key should be returned
+        """Test getting image metadata item
+
+        The value for a specific metadata key should be returned.
+        """
         meta = self.client.show_image_metadata_item(self.image_id,
                                                     'os_distro')['meta']
         self.assertEqual('value2', meta['os_distro'])
 
     @decorators.idempotent_id('f2de776a-4778-4d90-a5da-aae63aee64ae')
     def test_set_image_metadata_item(self):
-        # The value provided for the given meta item should be set for
-        # the image
+        """Test setting image metadata item
+
+        The value provided for the given meta item should be set for
+        the image.
+        """
         meta = {'os_version': 'alt'}
         self.client.set_image_metadata_item(self.image_id,
                                             'os_version', meta)
@@ -140,7 +157,10 @@
 
     @decorators.idempotent_id('a013796c-ba37-4bb5-8602-d944511def14')
     def test_delete_image_metadata_item(self):
-        # The metadata value/key pair should be deleted from the image
+        """Test deleting image metadata item
+
+        The metadata value/key pair should be deleted from the image.
+        """
         self.client.delete_image_metadata_item(self.image_id,
                                                'os_version')
         resp_metadata = self.client.list_image_metadata(self.image_id)
diff --git a/tempest/api/compute/images/test_image_metadata_negative.py b/tempest/api/compute/images/test_image_metadata_negative.py
index 407fb08..b9806c7 100644
--- a/tempest/api/compute/images/test_image_metadata_negative.py
+++ b/tempest/api/compute/images/test_image_metadata_negative.py
@@ -20,6 +20,11 @@
 
 
 class ImagesMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of image metadata
+
+    Negative tests of image metadata with compute microversion less than 2.39.
+    """
+
     max_microversion = '2.38'
 
     @classmethod
@@ -30,15 +35,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('94069db2-792f-4fa8-8bd3-2271a6e0c095')
     def test_list_nonexistent_image_metadata(self):
-        # Negative test: List on nonexistent image
-        # metadata should not happen
+        """Test listing metadata of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.list_image_metadata,
                           data_utils.rand_uuid())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a403ef9e-9f95-427c-b70a-3ce3388796f1')
     def test_update_nonexistent_image_metadata(self):
-        # Negative test:An update should not happen for a non-existent image
+        """Test updating metadata of a non existence image should fail"""
         meta = {'os_distro': 'alt1', 'os_version': 'alt2'}
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_image_metadata,
@@ -47,7 +51,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41ae052c-6ee6-405c-985e-5712393a620d')
     def test_get_nonexistent_image_metadata_item(self):
-        # Negative test: Get on non-existent image should not happen
+        """Test getting metadata of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.show_image_metadata_item,
                           data_utils.rand_uuid(), 'os_version')
@@ -55,7 +59,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dc64f2ce-77e8-45b0-88c8-e15041d08eaf')
     def test_set_nonexistent_image_metadata(self):
-        # Negative test: Metadata should not be set to a non-existent image
+        """Test setting metadata of a non existence image should fail"""
         meta = {'os_distro': 'alt1', 'os_version': 'alt2'}
         self.assertRaises(lib_exc.NotFound, self.client.set_image_metadata,
                           data_utils.rand_uuid(), meta)
@@ -63,8 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2154fd03-ab54-457c-8874-e6e3eb56e9cf')
     def test_set_nonexistent_image_metadata_item(self):
-        # Negative test: Metadata item should not be set to a
-        # nonexistent image
+        """Test setting metadata item of a non existence image should fail"""
         meta = {'os_distro': 'alt'}
         self.assertRaises(lib_exc.NotFound,
                           self.client.set_image_metadata_item,
@@ -74,8 +77,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('848e157f-6bcf-4b2e-a5dd-5124025a8518')
     def test_delete_nonexistent_image_metadata_item(self):
-        # Negative test: Shouldn't be able to delete metadata
-        # item from non-existent image
+        """Test deleting metadata item of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_image_metadata_item,
                           data_utils.rand_uuid(), 'os_distro')
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index eef2781..91ce1f9 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -25,6 +25,9 @@
 
 
 class ImagesTestJSON(base.BaseV2ComputeTest):
+    """Test server images"""
+
+    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -47,6 +50,7 @@
 
     @decorators.idempotent_id('aa06b52b-2db5-4807-b218-9441f75d74e3')
     def test_delete_saving_image(self):
+        """Test deleting server image while it is in 'SAVING' state"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.servers_client.delete_server, server['id'])
         # wait for server active to avoid conflict when deleting server
@@ -73,6 +77,7 @@
 
     @decorators.idempotent_id('aaacd1d0-55a2-4ce8-818a-b5439df8adc9')
     def test_create_image_from_stopped_server(self):
+        """Test creating server image from stopped server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.servers_client.stop_server(server['id'])
         waiters.wait_for_server_status(self.servers_client,
@@ -90,6 +95,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_create_image_from_paused_server(self):
+        """Test creating server image from paused server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.servers_client.pause_server(server['id'])
         waiters.wait_for_server_status(self.servers_client,
@@ -108,6 +114,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     def test_create_image_from_suspended_server(self):
+        """Test creating server image from suspended server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.servers_client.suspend_server(server['id'])
         waiters.wait_for_server_status(self.servers_client,
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 2400348..5ff2a6a 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -42,11 +42,12 @@
 
 
 class ImagesNegativeTestJSON(ImagesNegativeTestBase):
+    """Negative tests of server image"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6cd5a89d-5b47-46a7-93bc-3916f0d84973')
     def test_create_image_from_deleted_server(self):
-        # An image should not be created if the server instance is removed
+        """Check server image should not be created if the server is removed"""
         server = self.create_test_server(wait_until='ACTIVE')
 
         # Delete server before trying to create image
@@ -61,7 +62,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('82c5b0c4-9dbd-463c-872b-20c4755aae7f')
     def test_create_image_from_invalid_server(self):
-        # An image should not be created with invalid server id
+        """Check server image should not be created with invalid server id"""
         # Create a new image with invalid server id
         meta = {'image_type': 'test'}
         self.assertRaises(lib_exc.NotFound, self.create_image_from_server,
@@ -70,7 +71,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ec176029-73dc-4037-8d72-2e4ff60cf538')
     def test_create_image_specify_uuid_35_characters_or_less(self):
-        # Return an error if Image ID passed is 35 characters or less
+        """Check server image should not be created for invalid server id
+
+        Return an error if server id passed is 35 characters or less
+        """
         snapshot_name = data_utils.rand_name('test-snap')
         test_uuid = ('a' * 35)
         self.assertRaises(lib_exc.NotFound, self.client.create_image,
@@ -79,7 +83,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('36741560-510e-4cc2-8641-55fe4dfb2437')
     def test_create_image_specify_uuid_37_characters_or_more(self):
-        # Return an error if Image ID passed is 37 characters or more
+        """Check server image should not be created for invalid server id
+
+        Return an error if sever id passed is 37 characters or more
+        """
         snapshot_name = data_utils.rand_name('test-snap')
         test_uuid = ('a' * 37)
         self.assertRaises(lib_exc.NotFound, self.client.create_image,
@@ -87,20 +94,23 @@
 
 
 class ImagesDeleteNegativeTestJSON(ImagesNegativeTestBase):
+    """Negative tests of server image
+
+    Negative tests of server image with compute microversion less than 2.36.
+    """
     max_microversion = '2.35'
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('381acb65-785a-4942-94ce-d8f8c84f1f0f')
     def test_delete_image_with_invalid_image_id(self):
-        # An image should not be deleted with invalid image id
+        """Check an image should not be deleted with invalid image id"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           data_utils.rand_name('invalid'))
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('137aef61-39f7-44a1-8ddf-0adf82511701')
     def test_delete_non_existent_image(self):
-        # Return an error while trying to delete a non-existent image
-
+        """Check trying to delete a non-existent image should fail"""
         non_existent_image_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           non_existent_image_id)
@@ -108,13 +118,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e6e41425-af5c-4fe6-a4b5-7b7b963ffda5')
     def test_delete_image_blank_id(self):
-        # Return an error while trying to delete an image with blank Id
+        """Check trying to delete an image with blank id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('924540c3-f1f1-444c-8f58-718958b6724e')
     def test_delete_image_non_hex_string_id(self):
-        # Return an error while trying to delete an image with non hex id
+        """Check trying to delete an image with non hex id should fail"""
         invalid_image_id = data_utils.rand_uuid()[:-1] + "j"
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           invalid_image_id)
@@ -122,13 +132,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('68e2c175-bd26-4407-ac0f-4ea9ce2139ea')
     def test_delete_image_negative_image_id(self):
-        # Return an error while trying to delete an image with negative id
+        """Check trying to delete an image with negative id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image, -1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b340030d-82cd-4066-a314-c72fb7c59277')
     def test_delete_image_with_id_over_character_limit(self):
-        # Return an error while trying to delete image with id over limit
+        """Check trying to delete image with id over limit should fail"""
         invalid_image_id = data_utils.rand_uuid() + "1"
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           invalid_image_id)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index b811421..23f8326 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -22,6 +22,7 @@
 
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+    """Test server images API"""
 
     @classmethod
     def resource_setup(cls):
@@ -54,6 +55,7 @@
 
     @decorators.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
     def test_create_delete_image(self):
+        """Test create/delete server image"""
         if self.is_requested_microversion_compatible('2.35'):
             MIN_DISK = 'minDisk'
             MIN_RAM = 'minRam'
@@ -93,6 +95,7 @@
 
     @decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
     def test_create_image_specify_multibyte_character_image_name(self):
+        """Test creating server image with multibyte character image name"""
         # prefix character is:
         # http://unicode.org/cldr/utility/character.jsp?a=20A1
 
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 512c9d2..275a26f 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -30,6 +30,9 @@
 
 
 class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server images"""
+
+    create_default_network = True
 
     def tearDown(self):
         """Terminate test instances created after a test is executed."""
@@ -86,7 +89,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('55d1d38c-dd66-4933-9c8e-7d92aeb60ddc')
     def test_create_image_specify_invalid_metadata(self):
-        # Return an error when creating image with invalid metadata
+        """Test creating server image with invalid metadata should fail"""
         meta = {'': ''}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -94,7 +97,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3d24d11f-5366-4536-bd28-cff32b748eca')
     def test_create_image_specify_metadata_over_limits(self):
-        # Return an error when creating image with meta data over 255 chars
+        """Test creating server image with metadata over 255 should fail"""
         meta = {'a' * 256: 'b' * 256}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -102,28 +105,40 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0460efcf-ee88-4f94-acef-1bf658695456')
     def test_create_second_image_when_first_image_is_being_saved(self):
-        # Disallow creating another image when first image is being saved
+        """Test creating another server image when first image is being saved
 
-        # Create first snapshot
-        image = self.create_image_from_server(self.server_id)
-        self.addCleanup(self._reset_server)
+        Creating another server image when first image is being saved is
+        not allowed.
+        """
+        try:
+            # Create first snapshot
+            image = self.create_image_from_server(self.server_id)
+            self.addCleanup(self._reset_server)
 
-        # Create second snapshot
-        self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
-                          self.server_id)
+            # Create second snapshot
+            self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
+                              self.server_id)
 
-        if api_version_utils.compare_version_header_to_response(
-            "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
-            image_id = image['image_id']
-        else:
-            image_id = data_utils.parse_image_id(image.response['location'])
-        self.client.delete_image(image_id)
+            if api_version_utils.compare_version_header_to_response(
+                "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+                image_id = image['image_id']
+            else:
+                image_id = data_utils.parse_image_id(
+                    image.response['location'])
+            self.client.delete_image(image_id)
+
+        except lib_exc.TimeoutException as ex:
+            # Test cannot capture the image saving state.
+            # If timeout is reached, we don't need to check state,
+            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # it, this testcase doesn't have scope for other state transition
+            # Hence, skip the test.
+            raise self.skipException("This test is skipped because " + str(ex))
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
     def test_create_image_specify_name_over_character_limit(self):
-        # Return an error if snapshot name over 255 characters is passed
-
+        """Test creating server image with image name over 255 should fail"""
         snapshot_name = ('a' * 256)
         self.assertRaises(lib_exc.BadRequest,
                           self.compute_images_client.create_image,
@@ -132,8 +147,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0894954d-2db2-4195-a45b-ffec0bc0187e')
     def test_delete_image_that_is_not_yet_active(self):
-        # Return an error while trying to delete an image what is creating
-
+        """Test deleting a non-active server image should fail"""
         image = self.create_image_from_server(self.server_id)
         if api_version_utils.compare_version_header_to_response(
             "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 2ac7de3..15b8a00 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -15,7 +15,7 @@
 
 import time
 
-import six
+import io
 import testtools
 
 from tempest.api.compute import base
@@ -31,6 +31,8 @@
 
 
 class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
+    """Test listing server images with compute microversion less than 2.36"""
+
     max_microversion = '2.35'
 
     @classmethod
@@ -83,7 +85,7 @@
             # Wait 1 second between creation and upload to ensure a delta
             # between created_at and updated_at.
             time.sleep(1)
-            image_file = six.BytesIO((b'*' * 1024))
+            image_file = io.BytesIO((b'*' * 1024))
             if CONF.image_feature_enabled.api_v1:
                 cls.glance_client.update_image(image_id, data=image_file)
             else:
@@ -129,8 +131,11 @@
 
     @decorators.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
     def test_list_images_filter_by_status(self):
-        # The list of images should contain only images with the
-        # provided status
+        """Test listing server images filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(**params)['images']
 
@@ -140,8 +145,11 @@
 
     @decorators.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
     def test_list_images_filter_by_name(self):
-        # List of all images should contain the expected images filtered
-        # by name
+        """Test listing server images filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(**params)['images']
 
@@ -153,7 +161,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_id(self):
-        # The images should contain images filtered by server id
+        """Test listing images filtered by server id
+
+        The list of images should contain only images with the
+        provided server id.
+        """
         params = {'server': self.server1['id']}
         images = self.client.list_images(**params)['images']
 
@@ -169,7 +181,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_ref(self):
-        # The list of servers should be filtered by server ref
+        """Test listing images filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -188,7 +204,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_type(self):
-        # The list of servers should be filtered by image type
+        """Test listing images filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(**params)['images']
 
@@ -202,13 +222,22 @@
 
     @decorators.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
     def test_list_images_limit_results(self):
-        # Verify only the expected number of results are returned
+        """Test listing images with limited count
+
+        If we use limit=1 when listing images, then only 1 image should be
+        returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(**params)['images']
         self.assertEqual(1, len([x for x in images if 'id' in x]))
 
     @decorators.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
     def test_list_images_filter_by_changes_since(self):
+        """Test listing images filtered by changes-since
+
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Verify only updated images are returned in the detailed list
 
         # Becoming ACTIVE will modify the updated time
@@ -220,8 +249,11 @@
 
     @decorators.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
     def test_list_images_with_detail_filter_by_status(self):
-        # Detailed list of all images should only contain images
-        # with the provided status
+        """Test listing server images details filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -231,8 +263,11 @@
 
     @decorators.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
     def test_list_images_with_detail_filter_by_name(self):
-        # Detailed list of all images should contain the expected
-        # images filtered by name
+        """Test listing server images details filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -242,8 +277,11 @@
 
     @decorators.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
     def test_list_images_with_detail_limit_results(self):
-        # Verify only the expected number of results (with full details)
-        # are returned
+        """Test listing images details with limited count
+
+        If we use limit=1 when listing images with full details, then only 1
+        image should be returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(detail=True, **params)['images']
         self.assertEqual(1, len(images))
@@ -252,7 +290,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_server_ref(self):
-        # Detailed list of servers should be filtered by server ref
+        """Test listing images details filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -271,7 +313,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_type(self):
-        # The detailed list of servers should be filtered by image type
+        """Test listing images details filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(detail=True, **params)['images']
         self.client.show_image(self.image_ref)
@@ -286,8 +332,11 @@
 
     @decorators.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
     def test_list_images_with_detail_filter_by_changes_since(self):
-        # Verify an update image is returned
+        """Test listing images details filtered by changes-since
 
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Becoming ACTIVE will modify the updated time
         # Filter by the image's created time
         params = {'changes-since': self.image1['created']}
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index 81c59f9..f77da4b 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -22,6 +22,12 @@
 
 
 class ListImageFiltersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing images using compute images API
+
+    Negative tests of listing images using compute images API with
+    microversion less than 2.36.
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -39,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
     def test_get_nonexistent_image(self):
-        # Check raises a NotFound
+        """Test getting a non existent image should fail"""
         nonexistent_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.show_image,
                           nonexistent_image)
diff --git a/tempest/api/compute/images/test_list_images.py b/tempest/api/compute/images/test_list_images.py
index cbb65bb..4dc23a7 100644
--- a/tempest/api/compute/images/test_list_images.py
+++ b/tempest/api/compute/images/test_list_images.py
@@ -21,6 +21,8 @@
 
 
 class ListImagesTestJSON(base.BaseV2ComputeTest):
+    """Test listing server images with compute microversion less than 2.36"""
+
     max_microversion = '2.35'
 
     @classmethod
@@ -37,20 +39,26 @@
 
     @decorators.idempotent_id('490d0898-e12a-463f-aef0-c50156b9f789')
     def test_get_image(self):
-        # Returns the correct details for a single image
+        """Test getting the correct details for a single server image"""
         image = self.client.show_image(self.image_ref)['image']
         self.assertEqual(self.image_ref, image['id'])
 
     @decorators.idempotent_id('fd51b7f4-d4a3-4331-9885-866658112a6f')
     def test_list_images(self):
-        # The list of all images should contain the image
+        """Test listing server images
+
+        The list of all images should contain the image
+        """
         images = self.client.list_images()['images']
         found = [i for i in images if i['id'] == self.image_ref]
         self.assertNotEmpty(found)
 
     @decorators.idempotent_id('9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6')
     def test_list_images_with_detail(self):
-        # Detailed list of all images should contain the expected images
+        """Test listing server images with detail
+
+        Detailed list of all images should contain the expected images
+        """
         images = self.client.list_images(detail=True)['images']
         found = [i for i in images if i['id'] == self.image_ref]
         self.assertNotEmpty(found)
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 66abb21..8df2e84 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -19,11 +19,16 @@
 
 
 class KeyPairsV2TestJSON(base.BaseKeypairTest):
+    """Test keypairs API with compute microversion less than 2.2"""
+
     max_microversion = '2.1'
 
     @decorators.idempotent_id('1d1dbedb-d7a0-432a-9d09-83f543c3c19b')
     def test_keypairs_create_list_delete(self):
-        # Keypairs created should be available in the response list
+        """Test create/list/delete keypairs
+
+        Keypairs created should be available in the response list
+        """
         # Create 3 keypairs
         key_list = list()
         for _ in range(3):
@@ -48,7 +53,7 @@
 
     @decorators.idempotent_id('6c1d3123-4519-4742-9194-622cb1714b7d')
     def test_keypair_create_delete(self):
-        # Keypair should be created, verified and deleted
+        """Test create/delete keypair"""
         k_name = data_utils.rand_name('keypair')
         keypair = self.create_keypair(k_name)
         key_name = keypair['name']
@@ -58,7 +63,7 @@
 
     @decorators.idempotent_id('a4233d5d-52d8-47cc-9a25-e1864527e3df')
     def test_get_keypair_detail(self):
-        # Keypair should be created, Got details by name and deleted
+        """Test getting keypair detail by keypair name"""
         k_name = data_utils.rand_name('keypair')
         self.create_keypair(k_name)
         keypair_detail = self.keypairs_client.show_keypair(k_name)['keypair']
@@ -68,7 +73,7 @@
 
     @decorators.idempotent_id('39c90c6a-304a-49dd-95ec-2366129def05')
     def test_keypair_create_with_pub_key(self):
-        # Keypair should be created with a given public key
+        """Test creating keypair with a given public key"""
         k_name = data_utils.rand_name('keypair')
         pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
                    "Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
diff --git a/tempest/api/compute/keypairs/test_keypairs_negative.py b/tempest/api/compute/keypairs/test_keypairs_negative.py
index 81635ca..40bea3f 100644
--- a/tempest/api/compute/keypairs/test_keypairs_negative.py
+++ b/tempest/api/compute/keypairs/test_keypairs_negative.py
@@ -21,10 +21,12 @@
 
 
 class KeyPairsNegativeTestJSON(base.BaseKeypairTest):
+    """Negative tests of keypairs API"""
+
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('29cca892-46ae-4d48-bc32-8fe7e731eb81')
     def test_keypair_create_with_invalid_pub_key(self):
-        # Keypair should not be created with a non RSA public key
+        """Test keypair should not be created with a non RSA public key"""
         pub_key = "ssh-rsa JUNK nova@ubuntu"
         self.assertRaises(lib_exc.BadRequest,
                           self.create_keypair, pub_key=pub_key)
@@ -32,7 +34,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7cc32e47-4c42-489d-9623-c5e2cb5a2fa5')
     def test_keypair_delete_nonexistent_key(self):
-        # Non-existent key deletion should throw a proper error
+        """Test non-existent key deletion should throw a proper error"""
         k_name = data_utils.rand_name("keypair-non-existent")
         self.assertRaises(lib_exc.NotFound,
                           self.keypairs_client.delete_keypair,
@@ -41,7 +43,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dade320e-69ca-42a9-ba4a-345300f127e0')
     def test_create_keypair_with_empty_public_key(self):
-        # Keypair should not be created with an empty public key
+        """Test keypair should not be created with an empty public key"""
         pub_key = ' '
         self.assertRaises(lib_exc.BadRequest, self.create_keypair,
                           pub_key=pub_key)
@@ -49,7 +51,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fc100c19-2926-4b9c-8fdc-d0589ee2f9ff')
     def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
-        # Keypair should not be created when public key bits are too long
+        """Test keypair should not be created when public key are too long"""
         pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
         self.assertRaises(lib_exc.BadRequest, self.create_keypair,
                           pub_key=pub_key)
@@ -57,7 +59,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0359a7f1-f002-4682-8073-0c91e4011b7c')
     def test_create_keypair_with_duplicate_name(self):
-        # Keypairs with duplicate names should not be created
+        """Test keypairs with duplicate names should not be created"""
         k_name = data_utils.rand_name('keypair')
         self.keypairs_client.create_keypair(name=k_name)
         # Now try the same keyname to create another key
@@ -68,14 +70,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1398abe1-4a84-45fb-9294-89f514daff00')
     def test_create_keypair_with_empty_name_string(self):
-        # Keypairs with name being an empty string should not be created
+        """Test keypairs with empty name should not be created"""
         self.assertRaises(lib_exc.BadRequest, self.create_keypair,
                           '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3faa916f-779f-4103-aca7-dc3538eee1b7')
     def test_create_keypair_with_long_keynames(self):
-        # Keypairs with name longer than 255 chars should not be created
+        """Test keypairs with name longer than 255 should not be created"""
         k_name = 'keypair-'.ljust(260, '0')
         self.assertRaises(lib_exc.BadRequest, self.create_keypair,
                           k_name)
@@ -83,7 +85,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('45fbe5e0-acb5-49aa-837a-ff8d0719db91')
     def test_create_keypair_invalid_name(self):
-        # Keypairs with name being an invalid name should not be created
+        """Test keypairs with an invalid name should not be created"""
         k_name = r'key_/.\@:'
         self.assertRaises(lib_exc.BadRequest, self.create_keypair,
                           k_name)
diff --git a/tempest/api/compute/keypairs/test_keypairs_v22.py b/tempest/api/compute/keypairs/test_keypairs_v22.py
index 1aff262..e229c37 100644
--- a/tempest/api/compute/keypairs/test_keypairs_v22.py
+++ b/tempest/api/compute/keypairs/test_keypairs_v22.py
@@ -18,6 +18,8 @@
 
 
 class KeyPairsV22TestJSON(test_keypairs.KeyPairsV2TestJSON):
+    """Test keypairs API with compute microversion greater than 2.1"""
+
     min_microversion = '2.2'
     max_microversion = 'latest'
 
@@ -43,9 +45,11 @@
 
     @decorators.idempotent_id('8726fa85-7f98-4b20-af9e-f710a4f3391c')
     def test_keypairsv22_create_list_show(self):
+        """Test create/list/show keypair"""
         self._test_keypairs_create_list_show()
 
     @decorators.idempotent_id('89d59d43-f735-441a-abcf-0601727f47b6')
     def test_keypairsv22_create_list_show_with_type(self):
+        """Test create/list/show keypair with keypair type"""
         keypair_type = 'x509'
         self._test_keypairs_create_list_show(keypair_type=keypair_type)
diff --git a/tempest/api/compute/limits/test_absolute_limits.py b/tempest/api/compute/limits/test_absolute_limits.py
index 8c2202e..c729069 100644
--- a/tempest/api/compute/limits/test_absolute_limits.py
+++ b/tempest/api/compute/limits/test_absolute_limits.py
@@ -18,6 +18,11 @@
 
 
 class AbsoluteLimitsTestJSON(base.BaseV2ComputeTest):
+    """Test compute absolute limits
+
+    Test compute absolute limits with compute microversion less than 2.57
+    """
+
     max_microversion = '2.56'
 
     @classmethod
@@ -27,12 +32,17 @@
 
     @decorators.idempotent_id('b54c66af-6ab6-4cf0-a9e5-a0cb58d75e0b')
     def test_absLimits_get(self):
+        """Test getting nova absolute limits"""
         # To check if all limits are present in the response (will be checked
         # by schema)
         self.client.show_limits()
 
 
 class AbsoluteLimitsV257TestJSON(base.BaseV2ComputeTest):
+    """Test compute absolute limits
+
+    Test compute absolute limits with compute microversion greater than 2.56
+    """
     min_microversion = '2.57'
     max_microversion = 'latest'
 
diff --git a/tempest/api/compute/limits/test_absolute_limits_negative.py b/tempest/api/compute/limits/test_absolute_limits_negative.py
index 500638a..de6a9b9 100644
--- a/tempest/api/compute/limits/test_absolute_limits_negative.py
+++ b/tempest/api/compute/limits/test_absolute_limits_negative.py
@@ -20,6 +20,7 @@
 
 
 class AbsoluteLimitsNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of nova absolute limits"""
 
     def setUp(self):
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
@@ -34,7 +35,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('215cd465-d8ae-49c9-bf33-9c911913a5c8')
     def test_max_metadata_exceed_limit(self):
-        # We should not create vm with metadata over maxServerMeta limit
+        """Test creating server with metadata over limit should fail
+
+        We should not create server with metadata over maxServerMeta limit
+        """
         # Get max limit value
         limits = self.client.show_limits()['limits']
         max_meta = limits['absolute']['maxServerMeta']
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 4c99ea6..3c4daf6 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -18,6 +18,10 @@
 
 
 class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
+    """Test security group rules API
+
+    Test security group rules API with compute microversion less than 2.36.
+    """
 
     @classmethod
     def setup_clients(cls):
@@ -31,16 +35,16 @@
         cls.from_port = 22
         cls.to_port = 22
 
-    def setUp(cls):
-        super(SecurityGroupRulesTestJSON, cls).setUp()
+    def setUp(self):
+        super(SecurityGroupRulesTestJSON, self).setUp()
 
-        from_port = cls.from_port
-        to_port = cls.to_port
+        from_port = self.from_port
+        to_port = self.to_port
         group = {}
         ip_range = {}
-        cls.expected = {
+        self.expected = {
             'parent_group_id': None,
-            'ip_protocol': cls.ip_protocol,
+            'ip_protocol': self.ip_protocol,
             'from_port': from_port,
             'to_port': to_port,
             'ip_range': ip_range,
@@ -55,8 +59,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('850795d7-d4d3-4e55-b527-a774c0123d3a')
     def test_security_group_rules_create(self):
-        # Positive test: Creation of Security Group rule
-        # should be successful
+        """Test creating security group rules"""
         # Creating a Security Group to add rules to it
         security_group = self.create_security_group()
         securitygroup_id = security_group['id']
@@ -72,10 +75,7 @@
 
     @decorators.idempotent_id('7a01873e-3c38-4f30-80be-31a043cfe2fd')
     def test_security_group_rules_create_with_optional_cidr(self):
-        # Positive test: Creation of Security Group rule
-        # with optional argument cidr
-        # should be successful
-
+        """Test creating security group rules with optional field cidr"""
         # Creating a Security Group to add rules to it
         security_group = self.create_security_group()
         parent_group_id = security_group['id']
@@ -94,10 +94,7 @@
 
     @decorators.idempotent_id('7f5d2899-7705-4d4b-8458-4505188ffab6')
     def test_security_group_rules_create_with_optional_group_id(self):
-        # Positive test: Creation of Security Group rule
-        # with optional argument group_id
-        # should be successful
-
+        """Test creating security group rules with optional field group id"""
         # Creating a Security Group to add rules to it
         security_group = self.create_security_group()
         parent_group_id = security_group['id']
@@ -122,8 +119,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('a6154130-5a55-4850-8be4-5e9e796dbf17')
     def test_security_group_rules_list(self):
-        # Positive test: Created Security Group rules should be
-        # in the list of all rules
+        """Test listing security group rules"""
         # Creating a Security Group to add rules to it
         security_group = self.create_security_group()
         securitygroup_id = security_group['id']
@@ -159,7 +155,7 @@
 
     @decorators.idempotent_id('fc5c5acf-2091-43a6-a6ae-e42760e9ffaf')
     def test_security_group_rules_delete_when_peer_group_deleted(self):
-        # Positive test:rule will delete when peer group deleting
+        """Test security group rule gets deleted when peer group is deleted"""
         # Creating a Security Group to add rules to it
         security_group = self.create_security_group()
         sg1_id = security_group['id']
diff --git a/tempest/api/compute/security_groups/test_security_group_rules_negative.py b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
index 8283aae..3d000ca 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules_negative.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules_negative.py
@@ -20,6 +20,11 @@
 
 
 class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
+    """Negative tests of security group rules API
+
+    Negative tests of security group rules API with compute microversion
+    less than 2.36.
+    """
 
     @classmethod
     def setup_clients(cls):
@@ -29,8 +34,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1d507e98-7951-469b-82c3-23f1e6b8c254')
     def test_create_security_group_rule_with_non_existent_id(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with non existent Parent group id
+        """Test creating security group rule with non existent parent group
+
+        Negative test: Creation of security group rule should fail
+        with non existent parent group id.
+        """
         # Adding rules to the non existent Security Group id
         parent_group_id = self.generate_random_security_group_id()
         ip_protocol = 'tcp'
@@ -45,8 +53,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2244d7e4-adb7-4ecb-9930-2d77e123ce4f')
     def test_create_security_group_rule_with_invalid_id(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with Parent group id which is not integer
+        """Test creating security group rule with invalid parent group id
+
+        Negative test: Creation of security group rule should fail
+        with parent group id which is not integer.
+        """
         # Adding rules to the non int Security Group id
         parent_group_id = data_utils.rand_name('non_int_id')
         ip_protocol = 'tcp'
@@ -61,7 +72,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8bd56d02-3ffa-4d67-9933-b6b9a01d6089')
     def test_create_security_group_rule_duplicate(self):
-        # Negative test: Create Security Group rule duplicate should fail
+        """Test creating duplicate security group rule should fail"""
         # Creating a Security Group to add rule to it
         sg = self.create_security_group()
         # Adding rules to the created Security Group
@@ -85,8 +96,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('84c81249-9f6e-439c-9bbf-cbb0d2cddbdf')
     def test_create_security_group_rule_with_invalid_ip_protocol(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with invalid ip_protocol
+        """Test creating security group rule with invalid ip protocol
+
+        Negative test: Creation of security group rule should fail
+        with invalid ip_protocol.
+        """
         # Creating a Security Group to add rule to it
         sg = self.create_security_group()
         # Adding rules to the created Security Group
@@ -104,8 +118,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('12bbc875-1045-4f7a-be46-751277baedb9')
     def test_create_security_group_rule_with_invalid_from_port(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with invalid from_port
+        """Test creating security group rule with invalid from_port
+
+        Negative test: Creation of security group rule should fail
+        with invalid from_port.
+        """
         # Creating a Security Group to add rule to it
         sg = self.create_security_group()
         # Adding rules to the created Security Group
@@ -122,8 +139,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ff88804d-144f-45d1-bf59-dd155838a43a')
     def test_create_security_group_rule_with_invalid_to_port(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with invalid to_port
+        """Test creating security group rule with invalid to_port
+
+        Negative test: Creation of security group rule should fail
+        with invalid to_port.
+        """
         # Creating a Security Group to add rule to it
         sg = self.create_security_group()
         # Adding rules to the created Security Group
@@ -140,8 +160,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('00296fa9-0576-496a-ae15-fbab843189e0')
     def test_create_security_group_rule_with_invalid_port_range(self):
-        # Negative test: Creation of Security Group rule should FAIL
-        # with invalid port range.
+        """Test creating security group rule with invalid port range
+
+        Negative test: Creation of security group rule should fail
+        with invalid port range.
+        """
         # Creating a Security Group to add rule to it.
         sg = self.create_security_group()
         # Adding a rule to the created Security Group
@@ -158,8 +181,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('56fddcca-dbb8-4494-a0db-96e9f869527c')
     def test_delete_security_group_rule_with_non_existent_id(self):
-        # Negative test: Deletion of Security Group rule should be FAIL
-        # with non existent id
+        """Test deleting non existent security group rule should fail"""
         non_existent_rule_id = self.generate_random_security_group_id()
         self.assertRaises(lib_exc.NotFound,
                           self.rules_client.delete_security_group_rule,
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 62d5bea..a1f3514 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -21,6 +21,7 @@
 
 
 class SecurityGroupsTestJSON(base.BaseSecurityGroupsTest):
+    """Test security groups API with compute microversion less than 2.36"""
 
     @classmethod
     def setup_clients(cls):
@@ -30,7 +31,10 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('eb2b087d-633d-4d0d-a7bd-9e6ba35b32de')
     def test_security_groups_create_list_delete(self):
-        # Positive test:Should return the list of Security Groups
+        """Test create/list/delete security groups
+
+        Positive test: Should return the list of security groups.
+        """
         # Create 3 Security Groups
         security_group_list = []
         for _ in range(3):
@@ -60,9 +64,11 @@
 
     @decorators.idempotent_id('ecc0da4a-2117-48af-91af-993cca39a615')
     def test_security_group_create_get_delete(self):
-        # Security Group should be created, fetched and deleted
-        # with char space between name along with
-        # leading and trailing spaces
+        """Test create/get/delete security group
+
+        Security group should be created, fetched and deleted
+        with char space between name along with leading and trailing spaces.
+        """
         s_name = ' %s ' % data_utils.rand_name('securitygroup ')
         securitygroup = self.create_security_group(name=s_name)
         securitygroup_name = securitygroup['name']
@@ -80,8 +86,11 @@
 
     @decorators.idempotent_id('fe4abc0d-83f5-4c50-ad11-57a1127297a2')
     def test_server_security_groups(self):
-        # Checks that security groups may be added and linked to a server
-        # and not deleted if the server is active.
+        """Test adding security groups to a server
+
+        Checks that security groups may be added and linked to a server
+        and not deleted if the server is active.
+        """
         # Create a couple security groups that we will use
         # for the server resource this test creates
         sg = self.create_security_group()
@@ -100,9 +109,7 @@
                           sg['id'])
 
         # Reboot and add the other security group
-        self.servers_client.reboot_server(server_id, type='HARD')
-        waiters.wait_for_server_status(self.servers_client, server_id,
-                                       'ACTIVE')
+        self.reboot_server(server_id, type='HARD')
         self.servers_client.add_security_group(server_id, name=sg2['name'])
 
         # Check that we are not able to delete the other security
@@ -121,7 +128,7 @@
 
     @decorators.idempotent_id('7d4e1d3c-3209-4d6d-b020-986304ebad1f')
     def test_update_security_groups(self):
-        # Update security group name and description
+        """Test updating security group name and description"""
         # Create a security group
         securitygroup = self.create_security_group()
         securitygroup_id = securitygroup['id']
@@ -139,6 +146,11 @@
 
     @decorators.idempotent_id('79517d60-535a-438f-af3d-e6feab1cbea7')
     def test_list_security_groups_by_server(self):
+        """Test listing security groups by server
+
+        Create security groups and add them to a server, then list security
+        groups by server, the added security groups should be in the list.
+        """
         # Create a couple security groups that we will use
         # for the server resource this test creates
         sg = self.create_security_group()
diff --git a/tempest/api/compute/security_groups/test_security_groups_negative.py b/tempest/api/compute/security_groups/test_security_groups_negative.py
index 9c44bb2..4607112 100644
--- a/tempest/api/compute/security_groups/test_security_groups_negative.py
+++ b/tempest/api/compute/security_groups/test_security_groups_negative.py
@@ -25,6 +25,11 @@
 
 
 class SecurityGroupsNegativeTestJSON(base.BaseSecurityGroupsTest):
+    """Negative tests of security groups API
+
+    Negative tests of security groups API with compute microversion
+    less than 2.36.
+    """
 
     @classmethod
     def setup_clients(cls):
@@ -34,8 +39,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('673eaec1-9b3e-48ed-bdf1-2786c1b9661c')
     def test_security_group_get_nonexistent_group(self):
-        # Negative test:Should not be able to GET the details
-        # of non-existent Security Group
+        """Test getting non existent security group details should fail"""
         non_exist_id = self.generate_random_security_group_id()
         self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
                           non_exist_id)
@@ -45,8 +49,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1759c3cb-b0fc-44b7-86ce-c99236be911d')
     def test_security_group_create_with_invalid_group_name(self):
-        # Negative test: Security Group should not be created with group name
-        # as an empty string/with white spaces/chars more than 255
+        """Test creating security group with invalid group name should fail
+
+        Negative test: Security group should not be created with group name
+        as an empty string, or group name with white spaces, or group name
+        with chars more than 255.
+        """
         s_description = data_utils.rand_name('description')
         # Create Security Group with empty string as group name
         self.assertRaises(lib_exc.BadRequest,
@@ -67,9 +75,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('777b6f14-aca9-4758-9e84-38783cfa58bc')
     def test_security_group_create_with_invalid_group_description(self):
-        # Negative test: Security Group should not be created with description
-        # longer than 255 chars. Empty description is allowed by the API
-        # reference, however.
+        """Test creating security group with invalid group description
+
+        Negative test: Security group should not be created with description
+        longer than 255 chars. Empty description is allowed by the API
+        reference, however.
+        """
         s_name = data_utils.rand_name('securitygroup')
         # Create Security Group with group description longer than 255 chars
         s_description = 'description-'.ljust(260, '0')
@@ -82,8 +93,7 @@
                       "Neutron allows duplicate names for security groups")
     @decorators.attr(type=['negative'])
     def test_security_group_create_with_duplicate_name(self):
-        # Negative test:Security Group with duplicate name should not
-        # be created
+        """Test creating security group with duplicate name should fail"""
         s_name = data_utils.rand_name('securitygroup')
         s_description = data_utils.rand_name('description')
         self.create_security_group(name=s_name, description=s_description)
@@ -95,7 +105,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('36a1629f-c6da-4a26-b8b8-55e7e5d5cd58')
     def test_delete_the_default_security_group(self):
-        # Negative test:Deletion of the "default" Security Group should Fail
+        """Test deleting "default" security group should fail"""
         default_security_group_id = None
         body = self.client.list_security_groups()['security_groups']
         for i in range(len(body)):
@@ -110,7 +120,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6727c00b-214c-4f9e-9a52-017ac3e98411')
     def test_delete_nonexistent_security_group(self):
-        # Negative test:Deletion of a non-existent Security Group should fail
+        """Test deleting non existent security group should fail"""
         non_exist_id = self.generate_random_security_group_id()
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_security_group, non_exist_id)
@@ -118,8 +128,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1438f330-8fa4-4aeb-8a94-37c250106d7f')
     def test_delete_security_group_without_passing_id(self):
-        # Negative test:Deletion of a Security Group with out passing ID
-        # should Fail
+        """Test deleting security group passing empty group id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_security_group, '')
 
@@ -128,7 +137,7 @@
                       "Neutron does not check the security group ID")
     @decorators.attr(type=['negative'])
     def test_update_security_group_with_invalid_sg_id(self):
-        # Update security_group with invalid sg_id should fail
+        """Test updating security group with invalid group id should fail"""
         s_name = data_utils.rand_name('sg')
         s_description = data_utils.rand_name('description')
         # Create a non int sg_id
@@ -142,7 +151,7 @@
                       "Neutron does not check the security group name")
     @decorators.attr(type=['negative'])
     def test_update_security_group_with_invalid_sg_name(self):
-        # Update security_group with invalid sg_name should fail
+        """Test updating security group to invalid group name should fail"""
         securitygroup = self.create_security_group()
         securitygroup_id = securitygroup['id']
         # Update Security Group with group name longer than 255 chars
@@ -156,7 +165,7 @@
                       "Neutron does not check the security group description")
     @decorators.attr(type=['negative'])
     def test_update_security_group_with_invalid_sg_des(self):
-        # Update security_group with invalid sg_des should fail
+        """Test updating security group to invalid description should fail"""
         securitygroup = self.create_security_group()
         securitygroup_id = securitygroup['id']
         # Update Security Group with group description longer than 255 chars
@@ -168,7 +177,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('27edee9c-873d-4da6-a68a-3c256efebe8f')
     def test_update_non_existent_security_group(self):
-        # Update a non-existent Security Group should Fail
+        """Test updating a non existent security group should fail"""
         non_exist_id = self.generate_random_security_group_id()
         s_name = data_utils.rand_name('sg')
         s_description = data_utils.rand_name('description')
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index df8da07..ac18442 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -16,7 +16,6 @@
 import time
 
 from oslo_log import log
-import six
 
 from tempest.api.compute import base
 from tempest.common import compute
@@ -86,15 +85,20 @@
         # apparently not enough? Add cleanup here.
         self.addCleanup(self.delete_server, server['id'])
         self._wait_for_validation(server, validation_resources)
+        try:
+            fip = set([validation_resources['floating_ip']['ip']])
+        except KeyError:
+            fip = ()
         ifs = (self.interfaces_client.list_interfaces(server['id'])
                ['interfaceAttachments'])
         body = waiters.wait_for_interface_status(
             self.interfaces_client, server['id'], ifs[0]['port_id'], 'ACTIVE')
         ifs[0]['port_state'] = body['port_state']
-        return server, ifs
+        return server, ifs, fip
 
 
 class AttachInterfacesTestJSON(AttachInterfacesTestBase):
+    """Test attaching interfaces"""
 
     def wait_for_port_detach(self, port_id):
         """Waits for the port's device_id to be unset.
@@ -226,7 +230,8 @@
     @decorators.idempotent_id('73fe8f02-590d-4bf1-b184-e9ca81065051')
     @utils.services('network')
     def test_create_list_show_delete_interfaces_by_network_port(self):
-        server, ifs = self._create_server_get_interfaces()
+        """Test create/list/show/delete interfaces by network port"""
+        server, ifs, _ = self._create_server_get_interfaces()
         interface_count = len(ifs)
         self.assertGreater(interface_count, 0)
 
@@ -235,7 +240,7 @@
         except lib_exc.BadRequest as e:
             msg = ('Multiple possible networks found, use a Network ID to be '
                    'more specific.')
-            if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
+            if not CONF.compute.fixed_network_name and str(e) == msg:
                 raise
         else:
             ifs.append(iface)
@@ -258,6 +263,7 @@
     @decorators.idempotent_id('d290c06c-f5b3-11e7-8ec8-002293781009')
     @utils.services('network')
     def test_create_list_show_delete_interfaces_by_fixed_ip(self):
+        """Test create/list/show/delete interfaces by fixed ip"""
         # NOTE(zhufl) By default only project that is admin or network owner
         # or project with role advsvc is authorised to create interfaces with
         # fixed-ip, so if we don't create network for each project, do not
@@ -268,7 +274,7 @@
             raise self.skipException("Only owner network supports "
                                      "creating interface by fixed ip.")
 
-        server, ifs = self._create_server_get_interfaces()
+        server, ifs, _ = self._create_server_get_interfaces()
         interface_count = len(ifs)
         self.assertGreater(interface_count, 0)
 
@@ -286,7 +292,7 @@
 
     @decorators.idempotent_id('2f3a0127-95c7-4977-92d2-bc5aec602fb4')
     def test_reassign_port_between_servers(self):
-        """Tests the following:
+        """Tests reassigning port between servers
 
         1. Create a port in Neutron.
         2. Create two servers in Nova.
@@ -339,12 +345,15 @@
 
 
 class AttachInterfacesUnderV243Test(AttachInterfacesTestBase):
+    """Test attaching interfaces with compute microversion less than 2.44"""
+
     max_microversion = '2.43'
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('c7e0e60b-ee45-43d0-abeb-8596fd42a2f9')
     @utils.services('network')
     def test_add_remove_fixed_ip(self):
+        """Test adding and removing fixed ip from server"""
         # NOTE(zhufl) By default only project that is admin or network owner
         # or project with role advsvc is authorised to add interfaces with
         # fixed-ip, so if we don't create network for each project, do not
@@ -354,9 +363,8 @@
                 not CONF.network.shared_physical_network):
             raise self.skipException("Only owner network supports "
                                      "creating interface by fixed ip.")
-
         # Add and Remove the fixed IP to server.
-        server, ifs = self._create_server_get_interfaces()
+        server, ifs, fip = self._create_server_get_interfaces()
         original_interface_count = len(ifs)  # This is the number of ports.
         self.assertGreater(original_interface_count, 0)
         # Get the starting list of IPs on the server.
@@ -369,6 +377,9 @@
         self.assertEqual(1, len(addresses), addresses)  # number of networks
         # Keep track of the original addresses so we can know which IP is new.
         original_ips = [addr['addr'] for addr in list(addresses.values())[0]]
+        # Make sure the floating IP possibly assigned during
+        # server creation is always present in the set of original ips.
+        original_ips = set(original_ips).union(fip)
         original_ip_count = len(original_ips)
         self.assertGreater(original_ip_count, 0, addresses)  # at least 1
         network_id = ifs[0]['net_id']
@@ -376,40 +387,22 @@
         # fixed IP on the same network (and same port since we only have one
         # port).
         self.servers_client.add_fixed_ip(server['id'], networkId=network_id)
-        # Wait for the ips count to increase by one.
 
-        def _get_server_floating_ips():
-            _floating_ips = []
-            _server = self.os_primary.servers_client.show_server(
-                server['id'])['server']
-            for _ip_set in _server['addresses']:
-                for _ip in _server['addresses'][_ip_set]:
-                    if _ip['OS-EXT-IPS:type'] == 'floating':
-                        _floating_ips.append(_ip['addr'])
-            return _floating_ips
-
-        def _wait_for_ip_increase():
+        def _wait_for_ip_change(expected_count):
             _addresses = self.os_primary.servers_client.list_addresses(
                 server['id'])['addresses']
-            _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
-            LOG.debug("Wait for IP increase. All IPs still associated to "
+            _ips = set([addr['addr'] for addr in list(_addresses.values())[0]])
+            # Make sure possible floating ip is always present in the set.
+            _ips = _ips.union(fip)
+            LOG.debug("Wait for change of IPs. All IPs still associated to "
                       "the server %(id)s: %(ips)s",
                       {'id': server['id'], 'ips': _ips})
-            if len(_ips) == original_ip_count + 1:
-                return True
-            elif len(_ips) == original_ip_count:
-                return False
-            # If not, lets remove any floating IP from the list and check again
-            _fips = _get_server_floating_ips()
-            _ips = [_ip for _ip in _ips if _ip not in _fips]
-            LOG.debug("Wait for IP increase. Fixed IPs still associated to "
-                      "the server %(id)s: %(ips)s",
-                      {'id': server['id'], 'ips': _ips})
-            return len(_ips) == original_ip_count + 1
+            return len(_ips) == expected_count
 
+        # Wait for the ips count to increase by one.
         if not test_utils.call_until_true(
-                _wait_for_ip_increase, CONF.compute.build_timeout,
-                CONF.compute.build_interval):
+                _wait_for_ip_change, CONF.compute.build_timeout,
+                CONF.compute.build_interval, original_ip_count + 1):
             raise lib_exc.TimeoutException(
                 'Timed out while waiting for IP count to increase.')
 
@@ -428,26 +421,38 @@
                 break
         self.servers_client.remove_fixed_ip(server['id'], address=fixed_ip)
         # Wait for the interface count to decrease by one.
-
-        def _wait_for_ip_decrease():
-            _addresses = self.os_primary.servers_client.list_addresses(
-                server['id'])['addresses']
-            _ips = [addr['addr'] for addr in list(_addresses.values())[0]]
-            LOG.debug("Wait for IP decrease. All IPs still associated to "
-                      "the server %(id)s: %(ips)s",
-                      {'id': server['id'], 'ips': _ips})
-            if len(_ips) == original_ip_count:
-                return True
-            # If not, lets remove any floating IP from the list and check again
-            _fips = _get_server_floating_ips()
-            _ips = [_ip for _ip in _ips if _ip not in _fips]
-            LOG.debug("Wait for IP decrease. Fixed IPs still associated to "
-                      "the server %(id)s: %(ips)s",
-                      {'id': server['id'], 'ips': _ips})
-            return len(_ips) == original_ip_count
-
         if not test_utils.call_until_true(
-                _wait_for_ip_decrease, CONF.compute.build_timeout,
-                CONF.compute.build_interval):
+                _wait_for_ip_change, CONF.compute.build_timeout,
+                CONF.compute.build_interval, original_ip_count):
             raise lib_exc.TimeoutException(
                 'Timed out while waiting for IP count to decrease.')
+
+
+class AttachInterfacesV270Test(AttachInterfacesTestBase):
+    """Test interface API with microversion greater than 2.69"""
+    min_microversion = '2.70'
+
+    @decorators.idempotent_id('2853f095-8277-4067-92bd-9f10bd4f8e0c')
+    @utils.services('network')
+    def test_create_get_list_interfaces(self):
+        """Test interface API with microversion greater than 2.69
+
+        Checking create, get, list interface APIs response schema.
+        """
+        server = self.create_test_server(wait_until='ACTIVE')
+        try:
+            iface = self.interfaces_client.create_interface(server['id'])[
+                'interfaceAttachment']
+            iface = waiters.wait_for_interface_status(
+                self.interfaces_client, server['id'], iface['port_id'],
+                'ACTIVE')
+        except lib_exc.BadRequest as e:
+            msg = ('Multiple possible networks found, use a Network ID to be '
+                   'more specific.')
+            if not CONF.compute.fixed_network_name and str(e) == msg:
+                raise
+        else:
+            # just to check the response schema
+            self.interfaces_client.show_interface(
+                server['id'], iface['port_id'])
+            self.interfaces_client.list_interfaces(server['id'])
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 36828d6..d239149 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -27,6 +27,6 @@
 
     @decorators.idempotent_id('a8333aa2-205c-449f-a828-d38c2489bf25')
     def test_get_availability_zone_list_with_non_admin_user(self):
-        # List of availability zone with non-administrator user
+        """List of availability zone with non-administrator user"""
         availability_zone = self.client.list_availability_zones()
         self.assertNotEmpty(availability_zone['availabilityZoneInfo'])
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 4f0dbad..c9aec62 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -27,6 +27,11 @@
 
 
 class ServersTestJSON(base.BaseV2ComputeTest):
+    """Test creating server and verifying the server attributes
+
+    This is to create server booted from image and with disk_config 'AUTO'
+    """
+
     disk_config = 'AUTO'
     volume_backed = False
 
@@ -62,13 +67,12 @@
             disk_config=disk_config,
             adminPass=cls.password,
             volume_backed=cls.volume_backed)
-        cls.server = (cls.client.show_server(server_initial['id'])
-                      ['server'])
+        cls.server = cls.client.show_server(server_initial['id'])['server']
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
     def test_verify_server_details(self):
-        # Verify the specified server attributes are set correctly
+        """Verify the specified server attributes are set correctly"""
         self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
         # NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
         # Here we compare directly with the canonicalized format.
@@ -86,7 +90,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
     def test_list_servers(self):
-        # The created server should be in the list of all servers
+        """The created server should be in the list of all servers"""
         body = self.client.list_servers()
         servers = body['servers']
         found = [i for i in servers if i['id'] == self.server['id']]
@@ -94,7 +98,7 @@
 
     @decorators.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
     def test_list_servers_with_detail(self):
-        # The created server should be in the detailed list of all servers
+        """The created server should be in the detailed list of all servers"""
         body = self.client.list_servers(detail=True)
         servers = body['servers']
         found = [i for i in servers if i['id'] == self.server['id']]
@@ -104,8 +108,11 @@
     @testtools.skipUnless(CONF.validation.run_validation,
                           'Instance validation tests are disabled.')
     def test_verify_created_server_vcpus(self):
-        # Verify that the number of vcpus reported by the instance matches
-        # the amount stated by the flavor
+        """The created server should have the same specification as the flavor
+
+        Verify that the number of vcpus reported by the instance matches
+        the amount stated by the flavor
+        """
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         validation_resources = self.get_class_validation_resources(
             self.os_primary)
@@ -123,7 +130,7 @@
     @testtools.skipUnless(CONF.validation.run_validation,
                           'Instance validation tests are disabled.')
     def test_host_name_is_same_as_server_name(self):
-        # Verify the instance host name is the same as the server name
+        """Verify the instance host name is the same as the server name"""
         validation_resources = self.get_class_validation_resources(
             self.os_primary)
         linux_client = remote_client.RemoteClient(
@@ -145,6 +152,10 @@
 
 
 class ServersTestManualDisk(ServersTestJSON):
+    """Test creating server and verifying the server attributes
+
+    This is to create server booted from image and with disk_config 'MANUAL'
+    """
     disk_config = 'MANUAL'
 
     @classmethod
@@ -156,7 +167,11 @@
 
 
 class ServersTestBootFromVolume(ServersTestJSON):
-    """Run the `ServersTestJSON` tests with a volume backed VM"""
+    """Test creating server and verifying the server attributes
+
+    This is to create server booted from volume and with disk_config 'AUTO'
+    """
+    # Run the `ServersTestJSON` tests with a volume backed VM
     volume_backed = True
 
     @classmethod
@@ -165,3 +180,56 @@
         if not utils.get_service_list()['volume']:
             msg = "Volume service not enabled."
             raise cls.skipException(msg)
+
+
+class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
+    """Test creating server with FQDN hostname and verifying atrributes
+
+    Starting Wallaby release, Nova sanitizes freeform characters in
+    server hostname with dashes. This test verifies the same.
+    """
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(ServersTestFqdnHostnames, cls).setup_credentials()
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersTestFqdnHostnames, cls).setup_clients()
+        cls.client = cls.servers_client
+
+    @decorators.idempotent_id('622066d2-39fc-4c09-9eeb-35903c114a0a')
+    @testtools.skipUnless(
+        CONF.compute_feature_enabled.hostname_fqdn_sanitization,
+        'FQDN hostname sanitization is not supported.')
+    @testtools.skipUnless(CONF.validation.run_validation,
+                          'Instance validation tests are disabled.')
+    def test_create_server_with_fqdn_name(self):
+        """Test to create an instance with FQDN type name scheme"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        self.server_name = 'guest-instance-1.domain.com'
+        self.password = data_utils.rand_password()
+        self.accessIPv4 = '2.2.2.2'
+        test_server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='ACTIVE',
+            adminPass=self.password,
+            name=self.server_name,
+            accessIPv4=self.accessIPv4)
+
+        """Verify the hostname within the instance is sanitized
+
+        Freeform characters in the hostname are replaced with dashes
+        """
+        linux_client = remote_client.RemoteClient(
+            self.get_server_ip(test_server, validation_resources),
+            self.ssh_user,
+            self.password,
+            validation_resources['keypair']['private_key'],
+            server=test_server,
+            servers_client=self.client)
+        hostname = linux_client.exec_command("hostname").rstrip()
+        self.assertEqual('guest-instance-1-domain-com', hostname)
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index d0f53fe..bd3f58d 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -24,6 +24,7 @@
 
 
 class ServersTestMultiNic(base.BaseV2ComputeTest):
+    """Test multiple networks in servers"""
 
     @classmethod
     def skip_checks(cls):
@@ -59,8 +60,11 @@
 
     @decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_multiple_nics_order(self):
-        # Verify that the networks order given at the server creation is
-        # preserved within the server.
+        """Test verifying multiple networks order in server
+
+        The networks order given at the server creation is preserved within
+        the server.
+        """
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
         net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
 
@@ -95,6 +99,12 @@
 
     @decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_duplicate_network_nics(self):
+        """Test multiple duplicate networks can be used to create server
+
+        Creating server with networks [net1, net2, net1], the server can
+        be created successfully and all three networks are in the server
+        addresses.
+        """
         # Verify that server creation does not fail when more than one nic
         # is created on the same network.
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index a7db88a..ee25a22 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -26,6 +26,7 @@
 
 
 class DeleteServersTestJSON(base.BaseV2ComputeTest):
+    """Test deleting servers in various states"""
     create_default_network = True
 
     # NOTE: Server creations of each test class should be under 10
@@ -38,21 +39,21 @@
 
     @decorators.idempotent_id('9e6e0c87-3352-42f7-9faf-5d6210dbd159')
     def test_delete_server_while_in_building_state(self):
-        # Delete a server while it's VM state is Building
+        """Test deleting a server while it's VM state is Building"""
         server = self.create_test_server(wait_until='BUILD')
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
 
     @decorators.idempotent_id('925fdfb4-5b13-47ea-ac8a-c36ae6fddb05')
     def test_delete_active_server(self):
-        # Delete a server while it's VM state is Active
+        """Test deleting a server while it's VM state is Active"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
 
     @decorators.idempotent_id('546d368c-bb6c-4645-979a-83ed16f3a6be')
     def test_delete_server_while_in_shutoff_state(self):
-        # Delete a server while it's VM state is Shutoff
+        """Test deleting a server while it's VM state is Shutoff"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.stop_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SHUTOFF')
@@ -63,7 +64,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_delete_server_while_in_pause_state(self):
-        # Delete a server while it's VM state is Pause
+        """Test deleting a server while it's VM state is Pause"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.pause_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
@@ -74,7 +75,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     def test_delete_server_while_in_suspended_state(self):
-        # Delete a server while it's VM state is Suspended
+        """Test deleting a server while it's VM state is Suspended"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.suspend_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'SUSPENDED')
@@ -85,7 +86,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.shelve,
                           'Shelve is not available.')
     def test_delete_server_while_in_shelved_state(self):
-        # Delete a server while it's VM state is Shelved
+        """Test deleting a server while it's VM state is Shelved"""
         server = self.create_test_server(wait_until='ACTIVE')
         compute.shelve_server(self.client, server['id'])
 
@@ -96,7 +97,7 @@
     @testtools.skipIf(not CONF.compute_feature_enabled.resize,
                       'Resize not available.')
     def test_delete_server_while_in_verify_resize_state(self):
-        # Delete a server while it's VM state is VERIFY_RESIZE
+        """Test deleting a server while it's VM state is VERIFY_RESIZE"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.resize_server(server['id'], self.flavor_ref_alt)
         waiters.wait_for_server_status(self.client, server['id'],
@@ -107,7 +108,7 @@
     @decorators.idempotent_id('d0f3f0d6-d9b6-4a32-8da4-23015dcab23c')
     @utils.services('volume')
     def test_delete_server_while_in_attached_volume(self):
-        # Delete a server while a volume is attached to it
+        """Test deleting a server while a volume is attached to it"""
         server = self.create_test_server(wait_until='ACTIVE')
 
         volume = self.create_volume()
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 1f7eb7b..56456f4 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from json import decoder as json_decoder
+
 from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 
@@ -33,6 +35,8 @@
 
 class DeviceTaggingBase(base.BaseV2ComputeTest):
 
+    credentials = ['primary', 'admin']
+
     @classmethod
     def skip_checks(cls):
         super(DeviceTaggingBase, cls).skip_checks()
@@ -52,6 +56,7 @@
         cls.ports_client = cls.os_primary.ports_client
         cls.subnets_client = cls.os_primary.subnets_client
         cls.interfaces_client = cls.os_primary.interfaces_client
+        cls.servers_admin_client = cls.os_admin.servers_client
 
     @classmethod
     def setup_credentials(cls):
@@ -101,6 +106,7 @@
 
 
 class TaggedBootDevicesTest(DeviceTaggingBase):
+    """Test tagged boot device with compute microversion equals 2.32"""
 
     min_microversion = '2.32'
     # NOTE(mriedem): max_version looks odd but it's actually correct. Due to a
@@ -110,7 +116,11 @@
     max_microversion = '2.32'
 
     def verify_device_metadata(self, md_json):
-        md_dict = json.loads(md_json)
+        try:
+            md_dict = json.loads(md_json)
+        except (json_decoder.JSONDecodeError, TypeError):
+            return False
+
         for d in md_dict['devices']:
             if d['type'] == 'nic':
                 if d['mac'] == self.port1['mac_address']:
@@ -143,6 +153,16 @@
     @decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
     @utils.services('network', 'volume', 'image')
     def test_tagged_boot_devices(self):
+        """Test tagged boot devices
+
+        1. Create volumes
+        2. Create networks
+        3. Create subnets
+        4. Create ports
+        5. Create server, specifying tags for items in networks and
+           block_device_mapping_v2,
+        6. Verify tagged devices are in server via metadata service
+        """
         # Create volumes
         # The create_volume methods waits for the volumes to be available and
         # the base class will clean them up on tearDown.
@@ -294,11 +314,14 @@
 
 
 class TaggedBootDevicesTest_v242(TaggedBootDevicesTest):
+    """Test tagged boot devices with compute microversion greater than 2.41"""
+
     min_microversion = '2.42'
     max_microversion = 'latest'
 
 
 class TaggedAttachmentsTest(DeviceTaggingBase):
+    """Test tagged attachments with compute microversion greater than 2.48"""
 
     min_microversion = '2.49'
     max_microversion = 'latest'
@@ -310,11 +333,17 @@
             raise cls.skipException('Metadata API must be enabled')
 
     def verify_device_metadata(self, md_json):
-        md_dict = json.loads(md_json)
+        try:
+            md_dict = json.loads(md_json)
+        except (json_decoder.JSONDecodeError, TypeError):
+            return False
+
         found_devices = [d['tags'][0] for d in md_dict['devices']
                          if d.get('tags')]
         try:
-            self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+            self.assertEqual(
+                sorted(found_devices),
+                sorted(['nic-tag', 'volume-tag']))
             return True
         except Exception:
             return False
@@ -332,6 +361,16 @@
     @decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
     @utils.services('network', 'volume', 'image')
     def test_tagged_attachment(self):
+        """Test tagged attachment
+
+        1. Create network
+        2. Create subnet
+        3. Create volume
+        4. Create server
+        5. Attach tagged networks and volume
+        6. Verify tagged devices are in server via metadata service
+        7. Detach tagged networks and volume
+        """
         # Create network
         net = self.networks_client.create_network(
             name=data_utils.rand_name(
@@ -386,11 +425,13 @@
         self.servers_client.detach_volume(server['id'], volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
-        self.interfaces_client.delete_interface(server['id'],
-                                                interface['port_id'])
-        waiters.wait_for_interface_detach(self.interfaces_client,
+        req_id = self.interfaces_client.delete_interface(
+            server['id'], interface['port_id']
+        ).response['x-openstack-request-id']
+        waiters.wait_for_interface_detach(self.servers_admin_client,
                                           server['id'],
-                                          interface['port_id'])
+                                          interface['port_id'],
+                                          req_id)
         # FIXME(mriedem): The assertion that the tagged devices are removed
         # from the metadata for the server is being skipped until bug 1775947
         # is fixed.
diff --git a/tempest/api/compute/servers/test_disk_config.py b/tempest/api/compute/servers/test_disk_config.py
index 5b8e7ab..e5e051a 100644
--- a/tempest/api/compute/servers/test_disk_config.py
+++ b/tempest/api/compute/servers/test_disk_config.py
@@ -24,6 +24,8 @@
 
 
 class ServerDiskConfigTestJSON(base.BaseV2ComputeTest):
+    """Test disk config option of server"""
+
     create_default_network = True
 
     @classmethod
@@ -49,7 +51,7 @@
 
     @decorators.idempotent_id('bef56b09-2e8c-4883-a370-4950812f430e')
     def test_rebuild_server_with_manual_disk_config(self):
-        # A server should be rebuilt using the manual disk config option
+        """A server should be rebuilt using the manual disk config option"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.client.delete_server, server['id'])
         self._update_server_with_disk_config(server['id'],
@@ -68,7 +70,7 @@
 
     @decorators.idempotent_id('9c9fae77-4feb-402f-8450-bf1c8b609713')
     def test_rebuild_server_with_auto_disk_config(self):
-        # A server should be rebuilt using the auto disk config option
+        """A server should be rebuilt using the auto disk config option"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.client.delete_server, server['id'])
         self._update_server_with_disk_config(server['id'],
@@ -89,7 +91,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_from_manual_to_auto(self):
-        # A server should be resized from manual to auto disk config
+        """A server should be resized from manual to auto disk config"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.client.delete_server, server['id'])
         self._update_server_with_disk_config(server['id'],
@@ -105,7 +107,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_from_auto_to_manual(self):
-        # A server should be resized from auto to manual disk config
+        """A server should be resized from auto to manual disk config"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.client.delete_server, server['id'])
         self._update_server_with_disk_config(server['id'],
@@ -119,7 +121,7 @@
 
     @decorators.idempotent_id('5ef18867-358d-4de9-b3c9-94d4ba35742f')
     def test_update_server_from_auto_to_manual(self):
-        # A server should be updated from auto to manual disk config
+        """A server should be updated from auto to manual disk config"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.client.delete_server, server['id'])
         self._update_server_with_disk_config(server['id'],
diff --git a/tempest/api/compute/servers/test_instance_actions.py b/tempest/api/compute/servers/test_instance_actions.py
index 00837eb..028da68 100644
--- a/tempest/api/compute/servers/test_instance_actions.py
+++ b/tempest/api/compute/servers/test_instance_actions.py
@@ -19,6 +19,8 @@
 
 
 class InstanceActionsTestJSON(base.BaseV2ComputeTest):
+    """Test instance actions API"""
+
     create_default_network = True
 
     @classmethod
@@ -34,10 +36,8 @@
 
     @decorators.idempotent_id('77ca5cc5-9990-45e0-ab98-1de8fead201a')
     def test_list_instance_actions(self):
-        # List actions of the provided server
-        self.client.reboot_server(self.server['id'], type='HARD')
-        waiters.wait_for_server_status(self.client,
-                                       self.server['id'], 'ACTIVE')
+        """Test listing actions of the provided server"""
+        self.reboot_server(self.server['id'], type='HARD')
 
         body = (self.client.list_instance_actions(self.server['id'])
                 ['instanceActions'])
@@ -47,7 +47,7 @@
 
     @decorators.idempotent_id('aacc71ca-1d70-4aa5-bbf6-0ff71470e43c')
     def test_get_instance_action(self):
-        # Get the action details of the provided server
+        """Test getting the action details of the provided server"""
         body = self.client.show_instance_action(
             self.server['id'], self.request_id)['instanceAction']
         self.assertEqual(self.server['id'], body['instance_uuid'])
@@ -55,6 +55,8 @@
 
 
 class InstanceActionsV221TestJSON(base.BaseV2ComputeTest):
+    """Test instance actions with compute microversion greater than 2.20"""
+
     create_default_network = True
 
     min_microversion = '2.21'
@@ -67,8 +69,11 @@
 
     @decorators.idempotent_id('0a0f85d4-10fa-41f6-bf80-a54fb4aa2ae1')
     def test_get_list_deleted_instance_actions(self):
+        """Test listing actions for deleted instance
 
-        # List actions of the deleted server
+        Listing actions for deleted instance should succeed and the returned
+        actions should contain 'create' and 'delete'.
+        """
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.client, server['id'])
diff --git a/tempest/api/compute/servers/test_instance_actions_negative.py b/tempest/api/compute/servers/test_instance_actions_negative.py
index 4b5a2c3..dd2bf06 100644
--- a/tempest/api/compute/servers/test_instance_actions_negative.py
+++ b/tempest/api/compute/servers/test_instance_actions_negative.py
@@ -20,6 +20,8 @@
 
 
 class InstanceActionsNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of instance actions"""
+
     create_default_network = True
 
     @classmethod
@@ -35,7 +37,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('67e1fce6-7ec2-45c6-92d4-0a8f1a632910')
     def test_list_instance_actions_non_existent_server(self):
-        # List actions of the non-existent server id
+        """Test listing actions for non existent instance should fail"""
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.list_instance_actions,
@@ -44,6 +46,6 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0269f40a-6f18-456c-b336-c03623c897f1')
     def test_get_instance_action_invalid_request(self):
-        # Get the action details of the provided server with invalid request
+        """Test getting instance action with invalid request_id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.show_instance_action,
                           self.server['id'], '999')
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 3dffd01..990dd52 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -26,6 +26,7 @@
 
 
 class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
+    """Test listing servers filtered by specified attribute"""
 
     @classmethod
     def setup_credentials(cls):
@@ -71,7 +72,7 @@
     @testtools.skipUnless(CONF.compute.image_ref != CONF.compute.image_ref_alt,
                           "Need distinct images to run this test")
     def test_list_servers_filter_by_image(self):
-        # Filter the list of servers by image
+        """Filter the list of servers by image"""
         params = {'image': self.image_ref}
         body = self.client.list_servers(**params)
         servers = body['servers']
@@ -82,7 +83,7 @@
 
     @decorators.idempotent_id('573637f5-7325-47bb-9144-3476d0416908')
     def test_list_servers_filter_by_flavor(self):
-        # Filter the list of servers by flavor
+        """Filter the list of servers by flavor"""
         params = {'flavor': self.flavor_ref_alt}
         body = self.client.list_servers(**params)
         servers = body['servers']
@@ -93,7 +94,7 @@
 
     @decorators.idempotent_id('9b067a7b-7fee-4f6a-b29c-be43fe18fc5a')
     def test_list_servers_filter_by_server_name(self):
-        # Filter the list of servers by server name
+        """Filter the list of servers by server name"""
         params = {'name': self.s1_name}
         body = self.client.list_servers(**params)
         servers = body['servers']
@@ -104,7 +105,7 @@
 
     @decorators.idempotent_id('ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e')
     def test_list_servers_filter_by_active_status(self):
-        # Filter the list of servers by server active status
+        """Filter the list of servers by server active status"""
         params = {'status': 'active'}
         body = self.client.list_servers(**params)
         servers = body['servers']
@@ -115,7 +116,7 @@
 
     @decorators.idempotent_id('451dbbb2-f330-4a9f-b0e1-5f5d2cb0f34c')
     def test_list_servers_filter_by_shutoff_status(self):
-        # Filter the list of servers by server shutoff status
+        """Filter the list of servers by server shutoff status"""
         params = {'status': 'shutoff'}
         self.client.stop_server(self.s1['id'])
         waiters.wait_for_server_status(self.client, self.s1['id'],
@@ -132,21 +133,30 @@
 
     @decorators.idempotent_id('614cdfc1-d557-4bac-915b-3e67b48eee76')
     def test_list_servers_filter_by_limit(self):
-        # Verify only the expected number of servers are returned
+        """Filter the list of servers by limit 1
+
+        Verify only the expected number of servers are returned (one server)
+        """
         params = {'limit': 1}
         servers = self.client.list_servers(**params)
         self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
 
     @decorators.idempotent_id('b1495414-2d93-414c-8019-849afe8d319e')
     def test_list_servers_filter_by_zero_limit(self):
-        # Verify only the expected number of servers are returned
+        """Filter the list of servers by limit 0
+
+        Verify only the expected number of servers are returned (no server)
+        """
         params = {'limit': 0}
         servers = self.client.list_servers(**params)
         self.assertEmpty(servers['servers'])
 
     @decorators.idempotent_id('37791bbd-90c0-4de0-831e-5f38cba9c6b3')
     def test_list_servers_filter_by_exceed_limit(self):
-        # Verify only the expected number of servers are returned
+        """Filter the list of servers by exceeded limit
+
+        Verify only the expected number of servers are returned (all servers)
+        """
         params = {'limit': 100000}
         servers = self.client.list_servers(**params)
         all_servers = self.client.list_servers()
@@ -157,7 +167,7 @@
     @testtools.skipUnless(CONF.compute.image_ref != CONF.compute.image_ref_alt,
                           "Need distinct images to run this test")
     def test_list_servers_detailed_filter_by_image(self):
-        # Filter the detailed list of servers by image
+        """"Filter the detailed list of servers by image"""
         params = {'image': self.image_ref}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -168,7 +178,7 @@
 
     @decorators.idempotent_id('80c574cc-0925-44ba-8602-299028357dd9')
     def test_list_servers_detailed_filter_by_flavor(self):
-        # Filter the detailed list of servers by flavor
+        """Filter the detailed list of servers by flavor"""
         params = {'flavor': self.flavor_ref_alt}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -179,7 +189,7 @@
 
     @decorators.idempotent_id('f9eb2b70-735f-416c-b260-9914ac6181e4')
     def test_list_servers_detailed_filter_by_server_name(self):
-        # Filter the detailed list of servers by server name
+        """Filter the detailed list of servers by server name"""
         params = {'name': self.s1_name}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -190,7 +200,7 @@
 
     @decorators.idempotent_id('de2612ab-b7dd-4044-b0b1-d2539601911f')
     def test_list_servers_detailed_filter_by_server_status(self):
-        # Filter the detailed list of servers by server status
+        """Filter the detailed list of servers by server status"""
         params = {'status': 'active'}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -204,6 +214,7 @@
 
     @decorators.idempotent_id('e9f624ee-92af-4562-8bec-437945a18dcb')
     def test_list_servers_filtered_by_name_wildcard(self):
+        """Filter the list of servers by part of server name"""
         # List all servers that contains '-instance' in name
         params = {'name': '-instance'}
         body = self.client.list_servers(**params)
@@ -226,6 +237,7 @@
 
     @decorators.idempotent_id('24a89b0c-0d55-4a28-847f-45075f19b27b')
     def test_list_servers_filtered_by_name_regex(self):
+        """Filter the list of servers by server name regular expression"""
         # list of regex that should match s1, s2 and s3
         regexes = [r'^.*\-instance\-[0-9]+$', r'^.*\-instance\-.*$']
         for regex in regexes:
@@ -250,7 +262,7 @@
 
     @decorators.idempotent_id('43a1242e-7b31-48d1-88f2-3f72aa9f2077')
     def test_list_servers_filtered_by_ip(self):
-        # Filter servers by ip
+        """Filter the list of servers by server ip address"""
         # Here should be listed 1 server
         if not self.fixed_network_name:
             msg = 'fixed_network_name needs to be configured to run this test'
@@ -284,22 +296,32 @@
         for ip in ip_list:
             self.assertNotIn(ip_list[ip], map(lambda x: x['id'], servers))
 
-    @decorators.skip_because(bug="1540645")
     @decorators.idempotent_id('a905e287-c35e-42f2-b132-d02b09f3654a')
     def test_list_servers_filtered_by_ip_regex(self):
-        # Filter servers by regex ip
-        # List all servers filtered by part of ip address.
-        # Here should be listed all servers
+        """Filter the list of servers by part of server ip address"""
         if not self.fixed_network_name:
             msg = 'fixed_network_name needs to be configured to run this test'
             raise self.skipException(msg)
-        self.s1 = self.client.show_server(self.s1['id'])['server']
-        addr_spec = self.s1['addresses'][self.fixed_network_name][0]
-        ip = addr_spec['addr'][0:-3]
+        # query addresses of the 3 servers
+        addrs = []
+        for s in [self.s1, self.s2, self.s3]:
+            s_show = self.client.show_server(s['id'])['server']
+            addr_spec = s_show['addresses'][self.fixed_network_name][0]
+            addrs.append(addr_spec['addr'])
+        # find common part of the 3 ip addresses
+        prefix = ''
+        addrs_len = [len(a) for a in addrs]
+        addrs_len.sort()
+        # iterate over the smallest length of an ip
+        for i in range(addrs_len[0]):
+            if not addrs[0][i] == addrs[1][i] == addrs[2][i]:
+                break
+            prefix += addrs[0][i]
+
         if addr_spec['version'] == 4:
-            params = {'ip': ip}
+            params = {'ip': prefix}
         else:
-            params = {'ip6': ip}
+            params = {'ip6': prefix}
         # capture all servers in case something goes wrong
         all_servers = self.client.list_servers(detail=True)
         body = self.client.list_servers(**params)
@@ -317,7 +339,10 @@
 
     @decorators.idempotent_id('67aec2d0-35fe-4503-9f92-f13272b867ed')
     def test_list_servers_detailed_limit_results(self):
-        # Verify only the expected number of detailed results are returned
+        """Filter the detailed list of servers by limit 1
+
+        Verify only the expected number of servers are returned (one server)
+        """
         params = {'limit': 1}
         servers = self.client.list_servers(detail=True, **params)
         self.assertEqual(1, len(servers['servers']))
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index b95db5c..3d55696 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -20,6 +20,8 @@
 
 
 class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing servers"""
+
     create_default_network = True
 
     @classmethod
@@ -45,7 +47,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f')
     def test_list_servers_with_a_deleted_server(self):
-        # Verify deleted servers do not show by default in list servers
+        """Test that deleted servers do not show by default in list servers"""
         # List servers and verify server not returned
         body = self.client.list_servers()
         servers = body['servers']
@@ -56,7 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ff01387d-c7ad-47b4-ae9e-64fa214638fe')
     def test_list_servers_by_non_existing_image(self):
-        # Listing servers for a non existing image returns empty list
+        """Test listing servers for a non existing image returns empty list"""
         body = self.client.list_servers(image='non_existing_image')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -64,7 +66,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5913660b-223b-44d4-a651-a0fbfd44ca75')
     def test_list_servers_by_non_existing_flavor(self):
-        # Listing servers by non existing flavor returns empty list
+        """Test listing servers by non existing flavor returns empty list"""
         body = self.client.list_servers(flavor='non_existing_flavor')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -72,7 +74,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e2c77c4a-000a-4af3-a0bd-629a328bde7c')
     def test_list_servers_by_non_existing_server_name(self):
-        # Listing servers for a non existent server name returns empty list
+        """Test listing servers for a non existent server name
+
+        Listing servers for a non existent server name should return empty
+        list.
+        """
+
         body = self.client.list_servers(name='non_existing_server_name')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -80,9 +87,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fcdf192d-0f74-4d89-911f-1ec002b822c4')
     def test_list_servers_status_non_existing(self):
-        # When invalid status is specified, up to microversion 2.37,
-        # an empty list is returned, and starting from microversion 2.38,
-        # a 400 error is returned in that case.
+        """Test listing servers with non existing status
+
+        When invalid status is specified, up to microversion 2.37,
+        an empty list is returned, and starting from microversion 2.38,
+        a 400 error is returned in that case.
+        """
+
         if self.is_requested_microversion_compatible('2.37'):
             body = self.client.list_servers(status='non_existing_status')
             servers = body['servers']
@@ -94,6 +105,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d47c17fb-eebd-4287-8e95-f20a7e627b18')
     def test_list_servers_by_limits_greater_than_actual_count(self):
+        """Test listing servers by limit greater than actual count
+
+        Listing servers by limit greater than actual count should return
+        all servers.
+        """
+
         # Gather the complete list of servers in the project for reference
         full_list = self.client.list_servers()['servers']
         # List servers by specifying a greater value for limit
@@ -104,21 +121,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('679bc053-5e70-4514-9800-3dfab1a380a6')
     def test_list_servers_by_limits_pass_string(self):
-        # Return an error if a string value is passed for limit
+        """Test listing servers by non-integer limit should fail"""
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           limit='testing')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62610dd9-4713-4ee0-8beb-fd2c1aa7f950')
     def test_list_servers_by_limits_pass_negative_value(self):
-        # Return an error if a negative value for limit is passed
+        """Test listing servers by negative limit should fail"""
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           limit=-1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('87d12517-e20a-4c9c-97b6-dd1628d6d6c9')
     def test_list_servers_by_changes_since_invalid_date(self):
-        # Return an error when invalid date format is passed
+        """Test listing servers by invalid changes-since format should fail"""
         params = {'changes-since': '2011/01/01'}
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           **params)
@@ -126,7 +143,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('74745ad8-b346-45b5-b9b8-509d7447fc1f')
     def test_list_servers_by_changes_since_future_date(self):
-        # Return an empty list when a date in the future is passed.
+        """Test listing servers by a future changes-since date
+
+        Return an empty list when a date in the future is passed as
+        changes-since value.
+        """
+
         # updated_at field may haven't been set at the point in the boot
         # process where build_request still exists, so add
         # {'status': 'ACTIVE'} along with changes-since as filter.
@@ -138,7 +160,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('93055106-2d34-46fe-af68-d9ddbf7ee570')
     def test_list_servers_detail_server_is_deleted(self):
-        # Server details are not listed for a deleted server
+        """Test listing servers detail should not contain deleted server"""
         body = self.client.list_servers(detail=True)
         servers = body['servers']
         actual = [srv for srv in servers
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index e176251..10c76bb 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -19,10 +19,15 @@
 
 
 class MultipleCreateTestJSON(base.BaseV2ComputeTest):
+    """Test creating multiple servers in one request"""
     create_default_network = True
 
     @decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
     def test_multiple_create(self):
+        """Test creating multiple servers in one request
+
+        Creating server with min_count=2, 2 servers will be created.
+        """
         tenant_network = self.get_tenant_network()
         body, servers = compute.create_test_server(
             self.os_primary,
@@ -39,6 +44,12 @@
 
     @decorators.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
     def test_multiple_create_with_reservation_return(self):
+        """Test creating multiple servers with return_reservation_id=True
+
+        Creating multiple servers with return_reservation_id=True,
+        reservation_id will be returned.
+        """
+
         body = self.create_test_server(wait_until='ACTIVE',
                                        min_count=1,
                                        max_count=2,
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 422510f..3a970dd 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -19,10 +19,12 @@
 
 
 class MultipleCreateNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of creating multiple servers in one request"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
     def test_min_count_less_than_one(self):
+        """Test creating server with min_count=0 should fail"""
         invalid_min_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           min_count=invalid_min_count)
@@ -30,6 +32,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
     def test_min_count_non_integer(self):
+        """Test creating server with non-integer min_count should fail"""
         invalid_min_count = 2.5
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           min_count=invalid_min_count)
@@ -37,6 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
     def test_max_count_less_than_one(self):
+        """Test creating server with max_count < 1 shoudld fail"""
         invalid_max_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
@@ -44,6 +48,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
     def test_max_count_non_integer(self):
+        """Test creating server with non-integer max_count should fail"""
         invalid_max_count = 2.5
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
@@ -51,6 +56,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
     def test_max_count_less_than_min_count(self):
+        """Test creating server with max_count < min_count should fail"""
         min_count = 3
         max_count = 2
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 68e09e7..1308b19 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -14,9 +14,7 @@
 #    under the License.
 
 import struct
-
-import six
-import six.moves.urllib.parse as urlparse
+import urllib.parse as urlparse
 import urllib3
 
 from tempest.api.compute import base
@@ -26,13 +24,10 @@
 
 CONF = config.CONF
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
+    """Test novnc console"""
+
     create_default_network = True
 
     @classmethod
@@ -72,7 +67,7 @@
         resp = urllib3.PoolManager().request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + six.text_type(resp.status))
+                         'initial call: ' + str(resp.status))
         # Do some basic validation to make sure it is an expected HTML document
         resp_data = resp.data.decode()
         # This is needed in the case of example: <html lang="en">
@@ -114,18 +109,18 @@
             # single word(4 bytes).
             self.assertEqual(
                 data_length, 4, 'Expected authentication type None.')
-            self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+            self.assertIn(1, [int(data[i]) for i in (0, 3)],
                           'Expected authentication type None.')
         else:
             self.assertGreaterEqual(
                 len(data), 2, 'Expected authentication type None.')
             self.assertIn(
                 1,
-                [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+                [int(data[i + 1]) for i in range(int(data[0]))],
                 'Expected authentication type None.')
             # Send to the server that we only support authentication
             # type None
-            self._websocket.send_frame(six.int2byte(1))
+            self._websocket.send_frame(bytes((1,)))
 
             # The server should send 4 bytes of 0's if security
             # handshake succeeded
@@ -134,11 +129,11 @@
                 len(data), 4,
                 'Server did not think security was successful.')
             self.assertEqual(
-                [ord_func(i) for i in data], [0, 0, 0, 0],
+                [int(i) for i in data], [0, 0, 0, 0],
                 'Server did not think security was successful.')
 
         # Say to leave the desktop as shared as part of client initialization
-        self._websocket.send_frame(six.int2byte(1))
+        self._websocket.send_frame(bytes((1,)))
         # Get the server initialization packet back and make sure it is the
         # right structure where bytes 20-24 is the name length and
         # 24-N is the name
@@ -168,11 +163,11 @@
             self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
                                                 b'Protocols'),
             'Incorrect HTTP return status code: {}'.format(
-                six.text_type(self._websocket.response)
+                str(self._websocket.response)
             )
         )
         _required_header = 'upgrade: websocket'
-        _response = six.text_type(self._websocket.response).lower()
+        _response = str(self._websocket.response).lower()
         self.assertIn(
             _required_header,
             _response,
@@ -181,6 +176,7 @@
 
     @decorators.idempotent_id('c640fdff-8ab4-45a4-a5d8-7e6146cbd0dc')
     def test_novnc(self):
+        """Test accessing novnc console of server"""
         if self.use_get_remote_console:
             body = self.client.get_remote_console(
                 self.server['id'], console_type='novnc',
@@ -200,6 +196,11 @@
 
     @decorators.idempotent_id('f9c79937-addc-4aaa-9e0e-841eef02aeb7')
     def test_novnc_bad_token(self):
+        """Test accessing novnc console with bad token
+
+        Do the WebSockify HTTP Request to novnc proxy with a bad token,
+        the novnc proxy should reject the connection and closed it.
+        """
         if self.use_get_remote_console:
             body = self.client.get_remote_console(
                 self.server['id'], console_type='novnc',
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index d477be0..152e7e8 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urlparse
+
 from oslo_log import log as logging
-from six.moves.urllib import parse as urlparse
 import testtools
 
 from tempest.api.compute import base
@@ -34,6 +35,8 @@
 
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
+    """Test server actions"""
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
@@ -84,6 +87,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
                           'Change password not available.')
     def test_change_server_password(self):
+        """Test changing server's password
+
+        The server's password should be set to the provided password and
+        the user can authenticate with the new password.
+        """
         # Since this test messes with the password and makes the
         # server unreachable, it should create its own server
         validation_resources = self.get_test_validation_resources(
@@ -128,8 +136,7 @@
             # in a server
             linux_client.exec_command("sync")
 
-        self.client.reboot_server(self.server_id, type=reboot_type)
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type=reboot_type)
 
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
@@ -147,17 +154,15 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
     def test_reboot_server_hard(self):
-        # The server should be power cycled
-        self._test_reboot_server('HARD')
+        """Test hard rebooting server
 
-    @decorators.skip_because(bug="1014647")
-    @decorators.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
-    def test_reboot_server_soft(self):
-        # The server should be signaled to reboot gracefully
-        self._test_reboot_server('SOFT')
+        The server should be power cycled.
+        """
+        self._test_reboot_server('HARD')
 
     @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
     def test_remove_server_all_security_groups(self):
+        """Test removing all security groups from server"""
         server = self.create_test_server(wait_until='ACTIVE')
 
         # Remove all Security group
@@ -223,7 +228,7 @@
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(rebuilt_server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password,
                 validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
@@ -232,12 +237,19 @@
 
     @decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
     def test_rebuild_server(self):
+        """Test rebuilding server
+
+        The server should be rebuilt using the provided image and data.
+        """
         self._test_rebuild_server()
 
     @decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
     def test_rebuild_server_in_stop_state(self):
-        # The server in stop state  should be rebuilt using the provided
-        # image and remain in SHUTOFF state
+        """Test rebuilding server in stop state
+
+        The server in stop state should be rebuilt using the provided
+        image and remain in SHUTOFF state.
+        """
         server = self.client.show_server(self.server_id)['server']
         old_image = server['image']['id']
         new_image = (self.image_ref_alt
@@ -274,6 +286,10 @@
     @decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
     @utils.services('volume')
     def test_rebuild_server_with_volume_attached(self):
+        """Test rebuilding server with volume attached
+
+        The volume should be attached to the instance after rebuild.
+        """
         # create a new volume and attach it to the server
         volume = self.create_volume()
 
@@ -294,7 +310,7 @@
                 self.os_primary)
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password=None,
                 pkey=validation_resources['keypair']['private_key'],
                 server=server,
@@ -333,6 +349,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm(self):
+        """Test resizing server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=False)
 
     @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@@ -341,6 +358,7 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_volume_backed_server_confirm(self):
+        """Test resizing a volume backed server and then confirming"""
         # We have to create a new server that is volume-backed since the one
         # from setUp is not volume-backed.
         kwargs = {'volume_backed': True,
@@ -377,14 +395,18 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm_from_stopped(self):
+        """Test resizing a stopped server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=True)
 
     @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_revert(self):
-        # The server's RAM and disk space should return to its original
-        # values after a resize is reverted
+        """Test resizing server and then reverting
+
+        The server's RAM and disk space should return to its original
+        values after a resize is reverted.
+        """
 
         self.client.resize_server(self.server_id, self.flavor_ref_alt)
         # NOTE(zhufl): Explicitly delete the server to get a new one for later
@@ -405,10 +427,13 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_server_revert_with_volume_attached(self):
-        # Tests attaching a volume to a server instance and then resizing
-        # the instance. Once the instance is resized, revert the resize which
-        # should move the instance and volume attachment back to the original
-        # compute host.
+        """Test resizing a volume attached server and then reverting
+
+        Tests attaching a volume to a server instance and then resizing
+        the instance. Once the instance is resized, revert the resize which
+        should move the instance and volume attachment back to the original
+        compute host.
+        """
 
         # Create a blank volume and attach it to the server created in setUp.
         volume = self.create_volume()
@@ -437,7 +462,14 @@
                           'Snapshotting not available, backup not possible.')
     @utils.services('image')
     def test_create_backup(self):
-        # Positive test:create backup successfully and rotate backups correctly
+        """Test creating server backup
+
+        1. create server backup1 with rotation=2, there are 1 backup.
+        2. create server backup2 with rotation=2, there are 2 backups.
+        3. create server backup3, due to the rotation is 2, the first one
+           (backup1) will be deleted, so now there are still 2 backups.
+        """
+
         # create the first and the second backup
 
         # Check if glance v1 is available to determine which client to use. We
@@ -563,22 +595,29 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id and number of lines
+        """Test getting console output for a server
+
+        Should be able to GET the console output for a given server_id and
+        number of lines.
+        """
 
         # This reboot is necessary for outputting some console log after
         # creating an instance backup. If an instance backup, the console
         # log file is truncated and we cannot get any console log through
         # "console-log" API.
         # The detail is https://bugs.launchpad.net/nova/+bug/1251920
-        self.client.reboot_server(self.server_id, type='HARD')
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        self.reboot_server(self.server_id, type='HARD')
         self.wait_for(self._get_output)
 
     @decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_with_unlimited_size(self):
+        """Test getting server's console output with unlimited size
+
+        The console output lines length should be bigger than the one
+        of test_get_console_output.
+        """
         server = self.create_test_server(wait_until='ACTIVE')
 
         def _check_full_length_console_log():
@@ -597,8 +636,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_server_id_in_shutoff_status(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id in SHUTOFF status
+        """Test getting console output for a server in SHUTOFF status
+
+        Should be able to GET the console output for a given server_id
+        in SHUTOFF status.
+        """
 
         # NOTE: SHUTOFF is irregular status. To avoid test instability,
         #       one server is created only for this test without using
@@ -614,6 +656,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_pause_unpause_server(self):
+        """Test pausing and unpausing server"""
         self.client.pause_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
         self.client.unpause_server(self.server_id)
@@ -623,6 +666,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     def test_suspend_resume_server(self):
+        """Test suspending and resuming server"""
         self.client.suspend_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'SUSPENDED')
@@ -634,6 +678,7 @@
                           'Shelve is not available.')
     @utils.services('image')
     def test_shelve_unshelve_server(self):
+        """Test shelving and unshelving server"""
         if CONF.image_feature_enabled.api_v2:
             glance_client = self.os_primary.image_client_v2
         elif CONF.image_feature_enabled.api_v1:
@@ -673,6 +718,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_shelve_paused_server(self):
+        """Test shelving a paused server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.pause_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
@@ -682,6 +728,7 @@
 
     @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
     def test_stop_start_server(self):
+        """Test stopping and starting server"""
         self.client.stop_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
         self.client.start_server(self.server_id)
@@ -689,6 +736,12 @@
 
     @decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
     def test_lock_unlock_server(self):
+        """Test locking and unlocking server
+
+        Lock the server, and trying to stop it will fail because locked
+        server is not allowed to be stopped by non-admin user.
+        Then unlock the server, now the server can be stopped and started.
+        """
         # Lock the server,try server stop(exceptions throw),unlock it and retry
         self.client.lock_server(self.server_id)
         self.addCleanup(self.client.unlock_server, self.server_id)
@@ -714,6 +767,10 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
                           'VNC Console feature is disabled.')
     def test_get_vnc_console(self):
+        """Test getting vnc console from a server
+
+        The returned vnc console url should be in valid format.
+        """
         if self.is_requested_microversion_compatible('2.5'):
             body = self.client.get_vnc_console(
                 self.server_id, type='novnc')['console']
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index c936ce5..5a3f5d0 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -19,6 +19,7 @@
 
 
 class ServerAddressesTestJSON(base.BaseV2ComputeTest):
+    """Test server addresses"""
     create_default_network = True
 
     @classmethod
@@ -36,8 +37,10 @@
     @decorators.idempotent_id('6eb718c0-02d9-4d5e-acd1-4e0c269cef39')
     @utils.services('network')
     def test_list_server_addresses(self):
-        # All public and private addresses for
-        # a server should be returned
+        """Test listing server address
+
+        All public and private addresses for a server should be returned.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
@@ -51,8 +54,11 @@
     @decorators.idempotent_id('87bbc374-5538-4f64-b673-2b0e4443cc30')
     @utils.services('network')
     def test_list_server_addresses_by_network(self):
-        # Providing a network type should filter
-        # the addresses return by that type
+        """Test listing server addresses filtered by network addresses
+
+        Providing a network address should filter the addresses same with
+        the specified one.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
diff --git a/tempest/api/compute/servers/test_server_addresses_negative.py b/tempest/api/compute/servers/test_server_addresses_negative.py
index f33c6d9..e7444d2 100644
--- a/tempest/api/compute/servers/test_server_addresses_negative.py
+++ b/tempest/api/compute/servers/test_server_addresses_negative.py
@@ -20,6 +20,7 @@
 
 
 class ServerAddressesNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing server addresses"""
     create_default_network = True
 
     @classmethod
@@ -36,7 +37,7 @@
     @decorators.idempotent_id('02c3f645-2d2e-4417-8525-68c0407d001b')
     @utils.services('network')
     def test_list_server_addresses_invalid_server_id(self):
-        # List addresses request should fail if server id not in system
+        """List addresses request should fail if server id not in system"""
         self.assertRaises(lib_exc.NotFound, self.client.list_addresses,
                           '999')
 
@@ -44,7 +45,7 @@
     @decorators.idempotent_id('a2ab5144-78c0-4942-a0ed-cc8edccfd9ba')
     @utils.services('network')
     def test_list_server_addresses_by_network_neg(self):
-        # List addresses by network should fail if network name not valid
+        """List addresses by network should fail if network name not valid"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.list_addresses_by_network,
                           self.server['id'], 'invalid')
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
index 4b5efaa..4c0d021 100644
--- a/tempest/api/compute/servers/test_server_group.py
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -82,18 +82,18 @@
 
     @decorators.idempotent_id('5dc57eda-35b7-4af7-9e5f-3c2be3d2d68b')
     def test_create_delete_server_group_with_affinity_policy(self):
-        # Create and Delete the server-group with affinity policy
+        """Test Create/Delete the server-group with affinity policy"""
         self._create_delete_server_group(self.policy)
 
     @decorators.idempotent_id('3645a102-372f-4140-afad-13698d850d23')
     def test_create_delete_server_group_with_anti_affinity_policy(self):
-        # Create and Delete the server-group with anti-affinity policy
+        """Test Create/Delete the server-group with anti-affinity policy"""
         policy = ['anti-affinity']
         self._create_delete_server_group(policy)
 
     @decorators.idempotent_id('154dc5a4-a2fe-44b5-b99e-f15806a4a113')
     def test_create_delete_multiple_server_groups_with_same_name_policy(self):
-        # Create and Delete the server-groups with same name and same policy
+        """Test Create/Delete the server-groups with same name and policy"""
         server_groups = []
         server_group_name = data_utils.rand_name('server-group')
         for _ in range(0, 2):
@@ -108,14 +108,14 @@
 
     @decorators.idempotent_id('b3545034-dd78-48f0-bdc2-a4adfa6d0ead')
     def test_show_server_group(self):
-        # Get the server-group
+        """Test getting the server-group detail"""
         body = self.client.show_server_group(
             self.created_server_group['id'])['server_group']
         self.assertEqual(self.created_server_group, body)
 
     @decorators.idempotent_id('d4874179-27b4-4d7d-80e4-6c560cdfe321')
     def test_list_server_groups(self):
-        # List the server-group
+        """Test listing the server-groups"""
         body = self.client.list_server_groups()['server_groups']
         self.assertIn(self.created_server_group, body)
 
@@ -124,7 +124,7 @@
         compute.is_scheduler_filter_enabled("ServerGroupAffinityFilter"),
         'ServerGroupAffinityFilter is not available.')
     def test_create_server_with_scheduler_hint_group(self):
-        # Create a server with the scheduler hint "group".
+        """Test creating a server with the scheduler hint 'group'"""
         hints = {'group': self.created_server_group['id']}
         server = self.create_test_server(scheduler_hints=hints,
                                          wait_until='ACTIVE')
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 9d87e1c..9f93e76 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -14,13 +14,26 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
 
+
+# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
 class ServerMetadataTestJSON(base.BaseV2ComputeTest):
+    """Test server metadata"""
+
     create_default_network = True
 
     @classmethod
+    def skip_checks(cls):
+        super(ServerMetadataTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise cls.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
+    @classmethod
     def setup_clients(cls):
         super(ServerMetadataTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
@@ -37,7 +50,10 @@
 
     @decorators.idempotent_id('479da087-92b3-4dcf-aeb3-fd293b2d14ce')
     def test_list_server_metadata(self):
-        # All metadata key/value pairs for a server should be returned
+        """Test listing server metadata
+
+        All metadata key/value pairs for a server should be returned.
+        """
         resp_metadata = (self.client.list_server_metadata(self.server['id'])
                          ['metadata'])
 
@@ -47,7 +63,10 @@
 
     @decorators.idempotent_id('211021f6-21de-4657-a68f-908878cfe251')
     def test_set_server_metadata(self):
-        # The server's metadata should be replaced with the provided values
+        """Test setting server metadata
+
+        The server's metadata should be replaced with the provided values
+        """
         # Create a new set of metadata for the server
         req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
         self.client.set_server_metadata(self.server['id'], req_metadata)
@@ -60,8 +79,10 @@
 
     @decorators.idempotent_id('344d981e-0c33-4997-8a5d-6c1d803e4134')
     def test_update_server_metadata(self):
-        # The server's metadata values should be updated to the
-        # provided values
+        """Test updating server metadata
+
+        The server's metadata values should be updated to the provided values.
+        """
         meta = {'key1': 'alt1', 'key3': 'value3'}
         self.client.update_server_metadata(self.server['id'], meta)
 
@@ -73,8 +94,11 @@
 
     @decorators.idempotent_id('0f58d402-e34a-481d-8af8-b392b17426d9')
     def test_update_metadata_empty_body(self):
-        # The original metadata should not be lost if empty metadata body is
-        # passed
+        """Test updating server metadata to empty values
+
+        The original server metadata should not be lost if empty metadata
+        body is passed.
+        """
         meta = {}
         self.client.update_server_metadata(self.server['id'], meta)
         resp_metadata = (self.client.list_server_metadata(self.server['id'])
@@ -84,15 +108,19 @@
 
     @decorators.idempotent_id('3043c57d-7e0e-49a6-9a96-ad569c265e6a')
     def test_get_server_metadata_item(self):
-        # The value for a specific metadata key should be returned
+        """Test getting specific server metadata item"""
         meta = self.client.show_server_metadata_item(self.server['id'],
                                                      'key2')['meta']
         self.assertEqual('value2', meta['key2'])
 
     @decorators.idempotent_id('58c02d4f-5c67-40be-8744-d3fa5982eb1c')
     def test_set_server_metadata_item(self):
-        # The item's value should be updated to the provided value
-        # Update the metadata value
+        """Test updating specific server metadata item
+
+        The metadata item's value should be updated to the provided value.
+        """
+
+        # Update the metadata value.
         meta = {'nova': 'alt'}
         self.client.set_server_metadata_item(self.server['id'], 'nova', meta)
 
@@ -104,7 +132,10 @@
 
     @decorators.idempotent_id('127642d6-4c7b-4486-b7cd-07265a378658')
     def test_delete_server_metadata_item(self):
-        # The metadata value/key pair should be deleted from the server
+        """Test deleting server metadata item
+
+        The metadata value/key pair should be deleted from the server.
+        """
         self.client.delete_server_metadata_item(self.server['id'], 'key1')
 
         # Verify the metadata item has been removed
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 482ba09..655909c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -14,12 +14,18 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server metadata"""
+
+    create_default_network = True
 
     @classmethod
     def setup_clients(cls):
@@ -35,10 +41,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fe114a8f-3a57-4eff-9ee2-4e14628df049')
     def test_server_create_metadata_key_too_long(self):
+        """Test creating server with too long metadata key should fail"""
         # Attempt to start a server with a meta-data key that is > 255
         # characters
 
-        # Tryset_server_metadata_item a few values
+        # Try create a server with the metadata for a few values
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
@@ -51,7 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('92431555-4d8b-467c-b95b-b17daa5e57ff')
     def test_create_server_metadata_blank_key(self):
-        # Blank key should trigger an error.
+        """Test creating server with blank metadata key should fail"""
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -60,6 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4d9cd7a3-2010-4b41-b8fe-3bbf0b169466')
     def test_server_metadata_non_existent_server(self):
+        """Test getting metadata item for a non existent server should fail"""
         # GET on a non-existent server should not succeed
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -70,7 +78,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f408e78e-3066-4097-9299-3b0182da812e')
     def test_list_server_metadata_non_existent_server(self):
-        # List metadata on a non-existent server should not succeed
+        """Test listing metadata for a non existent server should fail"""
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.list_server_metadata,
@@ -78,9 +86,15 @@
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0025fbd6-a4ba-4cde-b8c2-96805dcfdabc')
-    def test_wrong_key_passed_in_body(self):
-        # Raise BadRequest if key in uri does not match
-        # the key passed in body.
+    def test_set_metadata_invalid_key(self):
+        """Test setting server metadata item with wrong key in body
+
+        Raise BadRequest if key in uri does not match the key passed in body.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'testkey': 'testvalue'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata_item,
@@ -89,7 +103,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0df38c2a-3d4e-4db5-98d8-d4d9fa843a12')
     def test_set_metadata_non_existent_server(self):
-        # Set metadata on a non-existent server should not succeed
+        """Test setting metadata for a non existent server should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.NotFound,
@@ -100,7 +118,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8')
     def test_update_metadata_non_existent_server(self):
-        # An update should not happen for a non-existent server
+        """Test updating metadata for a non existent server should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'key1': 'value1', 'key2': 'value2'}
         self.assertRaises(lib_exc.NotFound,
@@ -111,7 +133,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a452f38c-05c2-4b47-bd44-a4f0bf5a5e48')
     def test_update_metadata_with_blank_key(self):
-        # Blank key should trigger an error
+        """Test updating server metadata to blank key should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_server_metadata,
@@ -120,7 +146,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6bbd88e1-f8b3-424d-ba10-ae21c45ada8d')
     def test_delete_metadata_non_existent_server(self):
-        # Should not be able to delete metadata item from a non-existent server
+        """Test deleting metadata item from a non existent server
+
+        Should not be able to delete metadata item from a non-existent server.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_server_metadata_item,
@@ -130,9 +163,15 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d8c0a210-a5c3-4664-be04-69d96746b547')
     def test_metadata_items_limit(self):
-        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
-        # will be raised while exceeding metadata items limit for
-        # tenant.
+        """Test set/update server metadata over limit should fail
+
+        A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        will be raised while exceeding metadata items limit for project.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
         quota_metadata = quota_set['metadata_items']
@@ -156,8 +195,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('96100343-7fa9-40d8-80fa-d29ef588ce1c')
     def test_set_server_metadata_blank_key(self):
-        # Raise a bad request error for blank key.
-        # set_server_metadata will replace all metadata with new value
+        """Test setting server metadata with blank key should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
@@ -166,8 +208,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('64a91aee-9723-4863-be44-4c9d9f1e7d0e')
     def test_set_server_metadata_missing_metadata(self):
-        # Raise a bad request error for a missing metadata field
-        # set_server_metadata will replace all metadata with new value
+        """Test setting server metadata without metadata field should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index 7b31ede..f61d4fd 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -19,6 +19,8 @@
 
 
 class ServerPasswordTestJSON(base.BaseV2ComputeTest):
+    """Test server password"""
+
     create_default_network = True
 
     @classmethod
@@ -28,8 +30,10 @@
 
     @decorators.idempotent_id('f83b582f-62a8-4f22-85b0-0dee50ff783a')
     def test_get_server_password(self):
+        """Test getting password of a server"""
         self.servers_client.show_password(self.server['id'])
 
     @decorators.idempotent_id('f8229e8b-b625-4493-800a-bde86ac611ea')
     def test_delete_server_password(self):
+        """Test deleting password from a server"""
         self.servers_client.delete_password(self.server['id'])
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index 4f484e2..8a05e7a 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -28,6 +28,8 @@
 
 
 class ServerPersonalityTestJSON(base.BaseV2ComputeTest):
+    """Test servers with injected files"""
+    max_microversion = '2.56'
 
     @classmethod
     def setup_credentials(cls):
@@ -51,6 +53,7 @@
     @decorators.attr(type='slow')
     @decorators.idempotent_id('3cfe87fd-115b-4a02-b942-7dc36a337fdf')
     def test_create_server_with_personality(self):
+        """Test creating server with file injection"""
         file_contents = 'This is a test file.'
         file_path = '/test.txt'
         personality = [{'path': file_path,
@@ -85,6 +88,7 @@
     @decorators.attr(type='slow')
     @decorators.idempotent_id('128966d8-71fc-443c-8cab-08e24114ecc9')
     def test_rebuild_server_with_personality(self):
+        """Test injecting file when rebuilding server"""
         validation_resources = self.get_test_validation_resources(
             self.os_primary)
         server = self.create_test_server(
@@ -107,8 +111,11 @@
 
     @decorators.idempotent_id('176cd8c9-b9e8-48ee-a480-180beab292bf')
     def test_personality_files_exceed_limit(self):
-        # Server creation should fail if greater than the maximum allowed
-        # number of files are injected into the server.
+        """Test creating server with injected files over limitation
+
+        Server creation should fail if greater than the maximum allowed
+        number of files are injected into the server.
+        """
         file_contents = 'This is a test file.'
         personality = []
         limits = self.limits_client.show_limits()['limits']
@@ -131,8 +138,11 @@
     @decorators.attr(type='slow')
     @decorators.idempotent_id('52f12ee8-5180-40cc-b417-31572ea3d555')
     def test_can_create_server_with_max_number_personality_files(self):
-        # Server should be created successfully if maximum allowed number of
-        # files is injected into the server during creation.
+        """Test creating server with maximum allowed number of injected files
+
+        Server should be created successfully if maximum allowed number of
+        files is injected into the server during creation.
+        """
         file_contents = 'This is a test file.'
         limits = self.limits_client.show_limits()['limits']
         max_file_limit = limits['absolute']['maxPersonality']
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 6629794..354e3b9 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -25,6 +26,7 @@
 
 
 class ServerRescueTestBase(base.BaseV2ComputeTest):
+    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -52,9 +54,11 @@
 
 
 class ServerRescueTestJSON(ServerRescueTestBase):
+    """Test server rescue"""
 
     @decorators.idempotent_id('fd032140-714c-42e4-a8fd-adcd8df06be6')
     def test_rescue_unrescue_instance(self):
+        """Test rescue/unrescue server"""
         password = data_utils.rand_password()
         server = self.create_test_server(adminPass=password,
                                          wait_until='ACTIVE')
@@ -67,6 +71,7 @@
 
 
 class ServerRescueTestJSONUnderV235(ServerRescueTestBase):
+    """Test server rescue with compute microversion less than 2.36"""
 
     max_microversion = '2.35'
 
@@ -80,7 +85,7 @@
     @testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
                           "Floating ips are not available")
     def test_rescued_vm_associate_dissociate_floating_ip(self):
-        # Association of floating IP to a rescued vm
+        """Test associate/dissociate floating ip for rescued server"""
         floating_ip_body = self.floating_ips_client.create_floating_ip(
             pool=CONF.network.floating_network_name)['floating_ip']
         self.addCleanup(self.floating_ips_client.delete_floating_ip,
@@ -95,6 +100,7 @@
 
     @decorators.idempotent_id('affca41f-7195-492d-8065-e09eee245404')
     def test_rescued_vm_add_remove_security_group(self):
+        """Test add/remove security group to for rescued server"""
         # Add Security group
         sg = self.create_security_group()
         self.servers_client.add_security_group(self.rescued_server_id,
@@ -103,3 +109,165 @@
         # Delete Security group
         self.servers_client.remove_security_group(self.rescued_server_id,
                                                   name=sg['name'])
+
+
+class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
+    create_default_network = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseServerStableDeviceRescueTest, cls).skip_checks()
+        if not CONF.compute_feature_enabled.rescue:
+            msg = "Server rescue not available."
+            raise cls.skipException(msg)
+        if not CONF.compute_feature_enabled.stable_rescue:
+            msg = "Stable rescue not available."
+            raise cls.skipException(msg)
+
+    def _create_server_and_rescue_image(self, hw_rescue_device=None,
+                                        hw_rescue_bus=None,
+                                        block_device_mapping_v2=None):
+
+        server_id = self.create_test_server(
+            wait_until='ACTIVE')['id']
+        image_id = self.create_image_from_server(
+            server_id, wait_until='ACTIVE')['id']
+
+        if block_device_mapping_v2:
+            server_id = self.create_test_server(
+                wait_until='ACTIVE',
+                block_device_mapping_v2=block_device_mapping_v2)['id']
+
+        if hw_rescue_bus:
+            self.images_client.update_image(
+                image_id, [dict(add='/hw_rescue_bus',
+                                value=hw_rescue_bus)])
+        if hw_rescue_device:
+            self.images_client.update_image(
+                image_id, [dict(add='/hw_rescue_device',
+                                value=hw_rescue_device)])
+        return server_id, image_id
+
+    def _test_stable_device_rescue(self, server_id, rescue_image_id):
+        self.servers_client.rescue_server(
+            server_id, rescue_image_ref=rescue_image_id)
+        waiters.wait_for_server_status(
+            self.servers_client, server_id, 'RESCUE')
+        self.servers_client.unrescue_server(server_id)
+        waiters.wait_for_server_status(
+            self.servers_client, server_id, 'ACTIVE')
+
+
+class ServerStableDeviceRescueTestIDE(BaseServerStableDeviceRescueTest):
+    """Test rescuing server using an IDE device for the rescue disk"""
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+        if not CONF.compute_feature_enabled.ide_bus:
+            raise cls.skipException("IDE bus not available.")
+
+    @decorators.idempotent_id('947004c3-e8ef-47d9-9f00-97b74f9eaf96')
+    @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+                      "Aarch64 does not support ide bus for cdrom")
+    def test_stable_device_rescue_cdrom_ide(self):
+        """Test rescuing server with cdrom and ide as the rescue disk"""
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='cdrom', hw_rescue_bus='ide')
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+
+class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
+    """Test rescuing server specifying type of device for the rescue disk"""
+
+    @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
+    def test_stable_device_rescue_disk_virtio(self):
+        """Test rescuing server with disk and virtio as the rescue disk"""
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio')
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+    @decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
+    def test_stable_device_rescue_disk_scsi(self):
+        """Test rescuing server with disk and scsi as the rescue disk"""
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='scsi')
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+    @decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
+    def test_stable_device_rescue_disk_usb(self):
+        """Test rescuing server with disk and usb as the rescue disk"""
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='usb')
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+    @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+    @utils.services('volume')
+    def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
+        """Test rescuing server with volume attached
+
+        Attach a volume to the server and then rescue the server with disk
+        and virtio as the rescue disk.
+        """
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio')
+        server = self.servers_client.show_server(server_id)['server']
+        volume = self.create_volume()
+        self.attach_volume(server, volume)
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+
+class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
+    """Test rescuing server specifying type of device for the rescue disk
+
+    Test rescuing server specifying type of device for the rescue disk with
+    compute microversion greater than 2.86.
+    """
+
+    min_microversion = '2.87'
+
+    @classmethod
+    def skip_checks(cls):
+        super(ServerBootFromVolumeStableRescueTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
+    def test_stable_device_rescue_bfv_blank_volume(self):
+        """Test rescuing server with blank volume as block_device_mapping_v2
+
+        Create a server with block_device_mapping_v2 with blank volume,
+        then rescue the server with disk and virtio as the rescue disk.
+        """
+        block_device_mapping_v2 = [{
+            "boot_index": "0",
+            "source_type": "blank",
+            "volume_size": CONF.volume.volume_size,
+            "destination_type": "volume"}]
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio',
+            block_device_mapping_v2=block_device_mapping_v2)
+        self._test_stable_device_rescue(server_id, rescue_image_id)
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
+    def test_stable_device_rescue_bfv_image_volume(self):
+        """Test rescuing server with blank volume as block_device_mapping_v2
+
+        Create a server with block_device_mapping_v2 with image volume,
+        then rescue the server with disk and virtio as the rescue disk.
+        """
+        block_device_mapping_v2 = [{
+            "boot_index": "0",
+            "source_type": "image",
+            "volume_size": CONF.volume.volume_size,
+            "uuid": CONF.compute.image_ref,
+            "destination_type": "volume"}]
+        server_id, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio',
+            block_device_mapping_v2=block_device_mapping_v2)
+        self._test_stable_device_rescue(server_id, rescue_image_id)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index caceb64..9bcf062 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -27,6 +27,7 @@
 
 
 class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server rescue"""
 
     @classmethod
     def skip_checks(cls):
@@ -75,7 +76,7 @@
                           'Pause is not available.')
     @decorators.attr(type=['negative'])
     def test_rescue_paused_instance(self):
-        # Rescue a paused server
+        """Test rescuing a paused server should fail"""
         self.servers_client.pause_server(self.server_id)
         self.addCleanup(self._unpause, self.server_id)
         waiters.wait_for_server_status(self.servers_client,
@@ -87,13 +88,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('db22b618-f157-4566-a317-1b6d467a8094')
     def test_rescued_vm_reboot(self):
+        """Test rebooing a rescued server should fail"""
         self.assertRaises(lib_exc.Conflict, self.servers_client.reboot_server,
                           self.rescue_id, type='HARD')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6dfc0a55-3a77-4564-a144-1587b7971dde')
     def test_rescue_non_existent_server(self):
-        # Rescue a non-existing server
+        """Test rescuing a non-existing server should fail"""
         non_existent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.servers_client.rescue_server,
@@ -102,6 +104,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('70cdb8a1-89f8-437d-9448-8844fd82bf46')
     def test_rescued_vm_rebuild(self):
+        """Test rebuilding a rescued server should fail"""
         self.assertRaises(lib_exc.Conflict,
                           self.servers_client.rebuild_server,
                           self.rescue_id,
@@ -111,6 +114,7 @@
     @utils.services('volume')
     @decorators.attr(type=['negative'])
     def test_rescued_vm_attach_volume(self):
+        """Test attaching volume to a rescued server should fail"""
         volume = self.create_volume()
 
         # Rescue the server
@@ -130,6 +134,7 @@
     @utils.services('volume')
     @decorators.attr(type=['negative'])
     def test_rescued_vm_detach_volume(self):
+        """Test detaching volume from a rescued server should fail"""
         volume = self.create_volume()
 
         # Attach the volume to the server
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index 3893b01..c988788 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.compute import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
@@ -22,6 +20,7 @@
 
 
 class ServerTagsTestJSON(base.BaseV2ComputeTest):
+    """Test server tags with compute microversion greater than 2.25"""
 
     min_microversion = '2.26'
     max_microversion = 'latest'
@@ -54,6 +53,7 @@
 
     @decorators.idempotent_id('8d95abe2-c658-4c42-9a44-c0258500306b')
     def test_create_delete_tag(self):
+        """Test creating and deleting server tag"""
         # Check that no tags exist.
         fetched_tags = self.client.list_tags(self.server['id'])['tags']
         self.assertEmpty(fetched_tags)
@@ -73,6 +73,7 @@
 
     @decorators.idempotent_id('a2c1af8c-127d-417d-974b-8115f7e3d831')
     def test_update_all_tags(self):
+        """Test updating all server tags"""
         # Add server tags to the server.
         tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
         self._update_server_tags(self.server['id'], tags)
@@ -81,14 +82,15 @@
         new_tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
         replaced_tags = self.client.update_all_tags(
             self.server['id'], new_tags)['tags']
-        six.assertCountEqual(self, new_tags, replaced_tags)
+        self.assertCountEqual(new_tags, replaced_tags)
 
         # List the tags and check that the tags were replaced.
         fetched_tags = self.client.list_tags(self.server['id'])['tags']
-        six.assertCountEqual(self, new_tags, fetched_tags)
+        self.assertCountEqual(new_tags, fetched_tags)
 
     @decorators.idempotent_id('a63b2a74-e918-4b7c-bcab-10c855f3a57e')
     def test_delete_all_tags(self):
+        """Test deleting all server tags"""
         # Add server tags to the server.
         assigned_tags = [data_utils.rand_name('tag'),
                          data_utils.rand_name('tag')]
@@ -101,6 +103,7 @@
 
     @decorators.idempotent_id('81279a66-61c3-4759-b830-a2dbe64cbe08')
     def test_check_tag_existence(self):
+        """Test checking server tag existence"""
         # Add server tag to the server.
         assigned_tag = data_utils.rand_name('tag')
         self._update_server_tags(self.server['id'], assigned_tag)
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 3a4bd6d..1c839eb 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -25,6 +25,7 @@
 
 
 class ServersTestJSON(base.BaseV2ComputeTest):
+    """Test servers API"""
     create_default_network = True
 
     @classmethod
@@ -37,8 +38,11 @@
                           enable_instance_password,
                           'Instance password not available.')
     def test_create_server_with_admin_password(self):
-        # If an admin password is provided on server creation, the server's
-        # root password should be set to that password.
+        """Test creating server with admin password
+
+        If an admin password is provided on server creation, the server's
+        root password should be set to that password.
+        """
         server = self.create_test_server(adminPass='testpassword')
         self.addCleanup(self.delete_server, server['id'])
 
@@ -47,8 +51,7 @@
 
     @decorators.idempotent_id('8fea6be7-065e-47cf-89b8-496e6f96c699')
     def test_create_with_existing_server_name(self):
-        # Creating a server with a name that already exists is allowed
-
+        """Test creating a server with already existing name is allowed"""
         # TODO(sdague): clear out try, we do cleanup one layer up
         server_name = data_utils.rand_name(
             self.__class__.__name__ + '-server')
@@ -69,8 +72,7 @@
 
     @decorators.idempotent_id('f9e15296-d7f9-4e62-b53f-a04e89160833')
     def test_create_specify_keypair(self):
-        # Specify a keypair while creating a server
-
+        """Test creating server with keypair"""
         key_name = data_utils.rand_name('key')
         self.keypairs_client.create_keypair(name=key_name)
         self.addCleanup(self.keypairs_client.delete_keypair, key_name)
@@ -97,11 +99,11 @@
 
     @decorators.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2')
     def test_update_server_name(self):
-        # The server name should be changed to the provided value
+        """Test updating server name to the provided value"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
         # Update instance name with non-ASCII characters
-        prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
+        prefix_name = '\u00CD\u00F1st\u00E1\u00F1c\u00E9'
         self._update_server_name(server['id'], 'ACTIVE', prefix_name)
 
         # stop server and check server name update again
@@ -115,7 +117,7 @@
 
     @decorators.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077')
     def test_update_access_server_address(self):
-        # The server's access addresses should reflect the provided values
+        """Test updating server's access addresses to the provided value"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
 
@@ -132,7 +134,7 @@
 
     @decorators.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
     def test_create_server_with_ipv6_addr_only(self):
-        # Create a server without an IPv4 address(only IPv6 address).
+        """Test creating server with ipv6 address only(no ipv4 address)"""
         server = self.create_test_server(accessIPv6='2001:2001::3',
                                          wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
@@ -142,17 +144,22 @@
     @decorators.related_bug('1730756')
     @decorators.idempotent_id('defbaca5-d611-49f5-ae21-56ee25d2db49')
     def test_create_server_specify_multibyte_character_name(self):
-        # prefix character is:
-        # http://unicode.org/cldr/utility/character.jsp?a=20A1
+        """Test creating server with multi character name
 
-        # We use a string with 3 byte utf-8 character due to nova
-        # will return 400(Bad Request) if we attempt to send a name which has
-        # 4 byte utf-8 character.
+        prefix character is:
+        http://unicode.org/cldr/utility/character.jsp?a=20A1
+
+        We use a string with 3 byte utf-8 character due to nova
+        will return 400(Bad Request) if we attempt to send a name which has
+        4 byte utf-8 character.
+        """
         utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
         self.create_test_server(name=utf8_name, wait_until='ACTIVE')
 
 
 class ServerShowV247Test(base.BaseV2ComputeTest):
+    """Test servers API with compute microversion greater than 2.46"""
+
     min_microversion = '2.47'
     max_microversion = 'latest'
 
@@ -164,12 +171,14 @@
 
     @decorators.idempotent_id('88b0bdb2-494c-11e7-a919-92ebcb67fe33')
     def test_show_server(self):
+        """Test getting server detail"""
         server = self.create_test_server()
         # All fields will be checked by API schema
         self.servers_client.show_server(server['id'])
 
     @decorators.idempotent_id('8de397c2-57d0-4b90-aa30-e5d668f21a8b')
     def test_update_rebuild_list_server(self):
+        """Test update/rebuild/list server"""
         server = self.create_test_server()
         # Checking update API response schema
         self.servers_client.update_server(server['id'])
@@ -184,6 +193,8 @@
 
 
 class ServerShowV263Test(base.BaseV2ComputeTest):
+    """Test servers API with compute microversion greater than 2.62"""
+
     min_microversion = '2.63'
     max_microversion = 'latest'
 
@@ -195,6 +206,7 @@
                           'required to test image certificate validation.')
     @decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
     def test_show_update_rebuild_list_server(self):
+        """Test show/update/rebuild/list server"""
         trusted_certs = CONF.compute.certified_image_trusted_certs
         server = self.create_test_server(
             image_id=CONF.compute.certified_image_ref,
diff --git a/tempest/api/compute/servers/test_servers_microversions.py b/tempest/api/compute/servers/test_servers_microversions.py
index 2434884..566d04a 100644
--- a/tempest/api/compute/servers/test_servers_microversions.py
+++ b/tempest/api/compute/servers/test_servers_microversions.py
@@ -32,11 +32,13 @@
 
 
 class ServerShowV254Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.53"""
     min_microversion = '2.54'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('09170a98-4940-4637-add7-1a35121f1a5a')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.53"""
         server = self.create_test_server(wait_until='ACTIVE')
         keypair_name = data_utils.rand_name(
             self.__class__.__name__ + '-keypair')
@@ -52,11 +54,13 @@
 
 
 class ServerShowV257Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.56"""
     min_microversion = '2.57'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('803df848-080a-4261-8f11-b020cd9b6f60')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.56"""
         server = self.create_test_server(wait_until='ACTIVE')
         user_data = "ZWNobyAiaGVsbG8gd29ybGQi"
         # Checking rebuild API response schema
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 7fa30b0..4f85048 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -30,6 +30,8 @@
 
 
 class ServersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of servers"""
+
     create_default_network = True
 
     def setUp(self):
@@ -58,7 +60,8 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-        server = cls.create_test_server()
+        # Wait until the instance is active to avoid the delete racing
+        server = cls.create_test_server(wait_until='ACTIVE')
         cls.client.delete_server(server['id'])
         waiters.wait_for_server_termination(cls.client, server['id'])
         cls.deleted_server_id = server['id']
@@ -66,8 +69,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf')
     def test_server_name_blank(self):
-        # Create a server with name parameter empty
-
+        """Creating a server with name parameter empty should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
                           name='')
@@ -77,8 +79,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.personality,
                           'Nova personality feature disabled')
     def test_personality_file_contents_not_encoded(self):
-        # Use an unencoded file when creating a server with personality
-
+        """Using an unencoded injected file to create server should fail"""
         file_contents = 'This is a test file.'
         person = [{'path': '/etc/testfile.txt',
                    'contents': file_contents}]
@@ -90,8 +91,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fcba1052-0a50-4cf3-b1ac-fae241edf02f')
     def test_create_with_invalid_image(self):
-        # Create a server with an unknown image
-
+        """Creating a server with an unknown image should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
                           image_id=-1)
@@ -99,8 +99,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('18f5227f-d155-4429-807c-ccb103887537')
     def test_create_with_invalid_flavor(self):
-        # Create a server with an unknown flavor
-
+        """Creating a server with an unknown flavor should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
                           flavor=-1,)
@@ -108,8 +107,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7f70a4d1-608f-4794-9e56-cb182765972c')
     def test_invalid_access_ip_v4_address(self):
-        # An access IPv4 address must match a valid address pattern
+        """Creating a server with invalid ipv4 ip address should fail
 
+        An access IPv4 address must match a valid address pattern
+        """
         IPv4 = '1.1.1.1.1.1'
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server, accessIPv4=IPv4)
@@ -117,8 +118,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0')
     def test_invalid_ip_v6_address(self):
-        # An access IPv6 address must match a valid address pattern
+        """Creating a server with invalid ipv6 ip address should fail
 
+        An access IPv6 address must match a valid address pattern
+        """
         IPv6 = 'notvalid'
 
         self.assertRaises(lib_exc.BadRequest,
@@ -129,7 +132,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_nonexistent_server(self):
-        # Resize a non-existent server
+        """Resizing a non-existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.resize_server,
@@ -140,7 +143,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_with_non_existent_flavor(self):
-        # Resize a server with non-existent flavor
+        """Resizing a server with non existent flavor should fail"""
         nonexistent_flavor = data_utils.rand_uuid()
         self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
                           self.server_id, flavor_ref=nonexistent_flavor)
@@ -150,14 +153,14 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_with_null_flavor(self):
-        # Resize a server with null flavor
+        """Resizing a server with null flavor should fail"""
         self.assertRaises(lib_exc.BadRequest, self.client.resize_server,
                           self.server_id, flavor_ref="")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d4c023a0-9c55-4747-9dd5-413b820143c7')
     def test_reboot_non_existent_server(self):
-        # Reboot a non existent server
+        """Rebooting a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
                           nonexistent_server, type='SOFT')
@@ -167,7 +170,7 @@
                           'Pause is not available.')
     @decorators.attr(type=['negative'])
     def test_pause_paused_server(self):
-        # Pause a paused server.
+        """Pausing a paused server should fail"""
         self.client.pause_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
         self.assertRaises(lib_exc.Conflict,
@@ -178,7 +181,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
     def test_rebuild_deleted_server(self):
-        # Rebuild a deleted server
+        """Rebuilding a deleted server should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.rebuild_server,
                           self.deleted_server_id, self.image_ref)
@@ -187,14 +190,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('581a397d-5eab-486f-9cf9-1014bbd4c984')
     def test_reboot_deleted_server(self):
-        # Reboot a deleted server
+        """Rebooting a deleted server should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.reboot_server,
                           self.deleted_server_id, type='SOFT')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
     def test_rebuild_non_existent_server(self):
-        # Rebuild a non existent server
+        """Rebuilding a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.rebuild_server,
@@ -204,6 +207,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fd57f159-68d6-4c2a-902b-03070828a87e')
     def test_create_numeric_server_name(self):
+        """Creating a server with numeric server name should fail"""
         server_name = 12345
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -212,8 +216,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c3e0fb12-07fc-4d76-a22e-37409887afe8')
     def test_create_server_name_length_exceeds_256(self):
-        # Create a server with name length exceeding 255 characters
+        """Creating a server with name length exceeding limit should fail
 
+        Create a server with name length exceeding 255 characters, an error is
+        returned.
+        """
         server_name = 'a' * 256
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -224,6 +231,11 @@
     @utils.services('volume')
     @decorators.idempotent_id('12146ac1-d7df-4928-ad25-b1f99e5286cd')
     def test_create_server_invalid_bdm_in_2nd_dict(self):
+        """Creating a server with invalid block_device_mapping_v2 should fail
+
+        Create a server with invalid block_device_mapping_v2, an error is
+        returned.
+        """
         volume = self.create_volume()
         bdm_1st = {"source_type": "image",
                    "delete_on_termination": True,
@@ -243,10 +255,9 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4e72dc2d-44c5-4336-9667-f7972e95c402')
     def test_create_with_invalid_network_uuid(self):
+        """Creating a server with invalid network uuid should fail"""
         # Pass invalid network uuid while creating a server
-
         networks = [{'fixed_ip': '10.0.1.1', 'uuid': 'a-b-c-d-e-f-g-h-i-j'}]
-
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
                           networks=networks)
@@ -254,8 +265,8 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7a2efc39-530c-47de-b875-2dd01c8d39bd')
     def test_create_with_non_existent_keypair(self):
+        """Creating a server with non-existent keypair should fail"""
         # Pass a non-existent keypair while creating a server
-
         key_name = data_utils.rand_name('key')
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -264,8 +275,8 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7fc74810-0bd2-4cd7-8244-4f33a9db865a')
     def test_create_server_metadata_exceeds_length_limit(self):
+        """Creating a server with metadata longer than limit should fail """
         # Pass really long metadata while creating a server
-
         metadata = {'a': 'b' * 260}
         self.assertRaises((lib_exc.BadRequest, lib_exc.OverLimit),
                           self.create_test_server,
@@ -274,8 +285,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('aa8eed43-e2cb-4ebf-930b-da14f6a21d81')
     def test_update_name_of_non_existent_server(self):
-        # Update name of a non-existent server
-
+        """Updating name of a non-existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         new_name = data_utils.rand_name(
             self.__class__.__name__ + '-server') + '_updated'
@@ -286,18 +296,19 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('38204696-17c6-44da-9590-40f87fb5a899')
     def test_update_server_set_empty_name(self):
-        # Update name of the server to an empty string
-
+        """Updating name of the server to an empty string should fail"""
         new_name = ''
-
         self.assertRaises(lib_exc.BadRequest, self.client.update_server,
                           self.server_id, name=new_name)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5c8e244c-dada-4590-9944-749c455b431f')
     def test_update_server_name_length_exceeds_256(self):
-        # Update name of server exceed the name length limit
+        """Updating name of server exceeding the name length limit should fail
 
+        Update name of server exceeding the name length limit, an error is
+        returned.
+        """
         new_name = 'a' * 256
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_server,
@@ -307,8 +318,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1041b4e6-514b-4855-96a5-e974b60870a3')
     def test_delete_non_existent_server(self):
-        # Delete a non existent server
-
+        """Deleting a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.delete_server,
                           nonexistent_server)
@@ -316,23 +326,24 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('75f79124-277c-45e6-a373-a1d6803f4cc4')
     def test_delete_server_pass_negative_id(self):
-        # Pass an invalid string parameter to delete server
-
+        """Passing an invalid string parameter to delete server should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_server, -1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5')
     def test_delete_server_pass_id_exceeding_length_limit(self):
-        # Pass a server ID that exceeds length limit to delete server
+        """Deleting server with a server ID exceeding length limit should fail
 
+        Pass a server ID that exceeds length limit to delete server, an error
+        is returned.
+        """
         self.assertRaises(lib_exc.NotFound, self.client.delete_server,
                           sys.maxsize + 1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c5fa6041-80cd-483b-aa6d-4e45f19d093c')
     def test_create_with_nonexistent_security_group(self):
-        # Create a server with a nonexistent security group
-
+        """Creating a server with a nonexistent security group should fail"""
         security_groups = [{'name': 'does_not_exist'}]
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -341,7 +352,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3436b02f-1b1e-4f03-881e-c6a602327439')
     def test_get_non_existent_server(self):
-        # Get a non existent server details
+        """Getting a non existent server details should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.show_server,
                           nonexistent_server)
@@ -349,7 +360,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a31460a9-49e1-42aa-82ee-06e0bb7c2d03')
     def test_stop_non_existent_server(self):
-        # Stop a non existent server
+        """Stopping a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.servers_client.stop_server,
                           nonexistent_server)
@@ -359,7 +370,7 @@
                           'Pause is not available.')
     @decorators.attr(type=['negative'])
     def test_pause_non_existent_server(self):
-        # pause a non existent server
+        """Pausing a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.pause_server,
                           nonexistent_server)
@@ -369,7 +380,7 @@
                           'Pause is not available.')
     @decorators.attr(type=['negative'])
     def test_unpause_non_existent_server(self):
-        # unpause a non existent server
+        """Unpausing a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.unpause_server,
                           nonexistent_server)
@@ -379,7 +390,7 @@
                           'Pause is not available.')
     @decorators.attr(type=['negative'])
     def test_unpause_server_invalid_state(self):
-        # unpause an active server.
+        """Unpausing an active server should fail"""
         self.assertRaises(lib_exc.Conflict,
                           self.client.unpause_server,
                           self.server_id)
@@ -389,7 +400,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_suspend_non_existent_server(self):
-        # suspend a non existent server
+        """Suspending a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.suspend_server,
                           nonexistent_server)
@@ -399,7 +410,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_suspend_server_invalid_state(self):
-        # suspend a suspended server.
+        """Suspending a suspended server should fail"""
         self.client.suspend_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'SUSPENDED')
@@ -413,7 +424,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_resume_non_existent_server(self):
-        # resume a non existent server
+        """Resuming a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.resume_server,
                           nonexistent_server)
@@ -423,7 +434,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_resume_server_invalid_state(self):
-        # resume an active server.
+        """Resuming an active server should fail"""
         self.assertRaises(lib_exc.Conflict,
                           self.client.resume_server,
                           self.server_id)
@@ -431,7 +442,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7dd919e7-413f-4198-bebb-35e2a01b13e9')
     def test_get_console_output_of_non_existent_server(self):
-        # get the console output for a non existent server
+        """Getting the console output for a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.get_console_output,
@@ -440,7 +451,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6f47992b-5144-4250-9f8b-f00aa33950f3')
     def test_force_delete_nonexistent_server_id(self):
-        # force-delete a non existent server
+        """Force-deleting a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.force_delete_server,
@@ -449,7 +460,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c6d38cc-fcfb-437a-85b9-7b788af8bf01')
     def test_restore_nonexistent_server_id(self):
-        # restore-delete a non existent server
+        """Restore-deleting a non existent server should fail
+
+        We can restore a soft deleted server, but can't restore a non
+        existent server.
+        """
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.restore_soft_deleted_server,
@@ -458,7 +473,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7fcadfab-bd6a-4753-8db7-4a51e51aade9')
     def test_restore_server_invalid_state(self):
-        # we can only restore-delete a server in 'soft-delete' state
+        """Restore-deleting a server not in 'soft-delete' state should fail
+
+        We can restore a soft deleted server, but can't restore a server that
+        is not in 'soft-delete' state.
+        """
         self.assertRaises(lib_exc.Conflict,
                           self.client.restore_soft_deleted_server,
                           self.server_id)
@@ -468,7 +487,7 @@
                           'Shelve is not available.')
     @decorators.attr(type=['negative'])
     def test_shelve_non_existent_server(self):
-        # shelve a non existent server
+        """Shelving a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.shelve_server,
                           nonexistent_server)
@@ -478,7 +497,7 @@
                           'Shelve is not available.')
     @decorators.attr(type=['negative'])
     def test_shelve_shelved_server(self):
-        # shelve a shelved server.
+        """Shelving a shelved server should fail"""
         compute.shelve_server(self.client, self.server_id)
 
         def _unshelve_server():
@@ -508,7 +527,7 @@
                           'Shelve is not available.')
     @decorators.attr(type=['negative'])
     def test_unshelve_non_existent_server(self):
-        # unshelve a non existent server
+        """Unshelving a non existent server should fail"""
         nonexistent_server = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.unshelve_server,
                           nonexistent_server)
@@ -518,7 +537,7 @@
                           'Shelve is not available.')
     @decorators.attr(type=['negative'])
     def test_unshelve_server_invalid_state(self):
-        # unshelve an active server.
+        """Unshelving an active server should fail"""
         self.assertRaises(lib_exc.Conflict,
                           self.client.unshelve_server,
                           self.server_id)
@@ -527,7 +546,7 @@
     @decorators.idempotent_id('74085be3-a370-4ca2-bc51-2d0e10e0f573')
     @utils.services('volume', 'image')
     def test_create_server_from_non_bootable_volume(self):
-        # Create a volume
+        """Creating a server from a non bootable volume should fail"""
         volume = self.create_volume()
 
         # Update volume bootable status to false
@@ -555,6 +574,8 @@
 
 
 class ServersNegativeTestMultiTenantJSON(base.BaseV2ComputeTest):
+    """Negative tests of servers for multiple projects"""
+
     create_default_network = True
 
     credentials = ['primary', 'alt']
@@ -581,8 +602,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('543d84c1-dd2e-4c6d-8cb2-b9da0efaa384')
     def test_update_server_of_another_tenant(self):
-        # Update name of a server that belongs to another tenant
+        """Updating server that belongs to another project should fail
 
+        Update name of a server that belongs to another project, an error is
+        returned.
+        """
         new_name = self.server_id + '_new'
         self.assertRaises(lib_exc.NotFound,
                           self.alt_client.update_server, self.server_id,
@@ -591,7 +615,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5c75009d-3eea-423e-bea3-61b09fd25f9c')
     def test_delete_a_server_of_another_tenant(self):
-        # Delete a server that belongs to another tenant
+        """Deleting a server that belongs to another project should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.alt_client.delete_server,
                           self.server_id)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index dfd6ca4..b2e02c5 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -28,6 +28,8 @@
 # TODO(mriedem): Remove this test class once the nova queens branch goes into
 # extended maintenance mode.
 class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+    """Test virtual interfaces API with compute microversion less than 2.44"""
+
     max_microversion = '2.43'
 
     depends_on_nova_network = True
@@ -47,9 +49,7 @@
     @decorators.idempotent_id('96c4e2ef-5e4d-4d7f-87f5-fed6dca18016')
     @utils.services('network')
     def test_list_virtual_interfaces(self):
-        # Positive test:Should be able to GET the virtual interfaces list
-        # for a given server_id
-
+        """Test listing virtual interfaces of a server"""
         if CONF.service_available.neutron:
             with testtools.ExpectedException(exceptions.BadRequest):
                 self.client.list_virtual_interfaces(self.server['id'])
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index f6e8bc9..5667281 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -23,6 +23,12 @@
 # TODO(mriedem): Remove this test class once the nova queens branch goes into
 # extended maintenance mode.
 class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of virtual interfaces API
+
+    Negative tests of virtual interfaces API for compute microversion less
+    than 2.44.
+    """
+
     max_microversion = '2.43'
 
     depends_on_nova_network = True
@@ -37,8 +43,7 @@
     @decorators.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
     @utils.services('network')
     def test_list_virtual_interfaces_invalid_server_id(self):
-        # Negative test: Should not be able to GET virtual interfaces
-        # for an invalid server_id
+        """Test listing virtual interfaces of an invalid server should fail"""
         invalid_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.servers_client.list_virtual_interfaces,
diff --git a/tempest/api/compute/test_extensions.py b/tempest/api/compute/test_extensions.py
index 12e7fea..3318876 100644
--- a/tempest/api/compute/test_extensions.py
+++ b/tempest/api/compute/test_extensions.py
@@ -27,10 +27,11 @@
 
 
 class ExtensionsTest(base.BaseV2ComputeTest):
+    """Tests Compute Extensions API"""
 
     @decorators.idempotent_id('3bb27738-b759-4e0d-a5fa-37d7a6df07d1')
     def test_list_extensions(self):
-        # List of all extensions
+        """Test listing compute extensions"""
         if not CONF.compute_feature_enabled.api_extensions:
             raise self.skipException('There are not any extensions configured')
         extensions = self.extensions_client.list_extensions()['extensions']
@@ -50,6 +51,6 @@
     @decorators.idempotent_id('05762f39-bdfa-4cdb-9b46-b78f8e78e2fd')
     @utils.requires_ext(extension='os-consoles', service='compute')
     def test_get_extension(self):
-        # get the specified extensions
+        """Test getting specified compute extension details"""
         extension = self.extensions_client.show_extension('os-consoles')
         self.assertEqual('os-consoles', extension['extension']['alias'])
diff --git a/tempest/api/compute/test_networks.py b/tempest/api/compute/test_networks.py
index 76131e2..97c26e4 100644
--- a/tempest/api/compute/test_networks.py
+++ b/tempest/api/compute/test_networks.py
@@ -20,6 +20,7 @@
 
 
 class ComputeNetworksTest(base.BaseV2ComputeTest):
+    """Test compute networks API with compute microversion less than 2.36"""
     max_microversion = '2.35'
 
     @classmethod
@@ -35,5 +36,6 @@
 
     @decorators.idempotent_id('3fe07175-312e-49a5-a623-5f52eeada4c2')
     def test_list_networks(self):
+        """Test listing networks using compute networks API"""
         networks = self.client.list_networks()['networks']
         self.assertNotEmpty(networks, "No networks found.")
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index a62492d..5fe0e3b 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -20,6 +20,7 @@
 
 
 class QuotasTestJSON(base.BaseV2ComputeTest):
+    """Test compute quotas"""
 
     @classmethod
     def skip_checks(cls):
@@ -59,7 +60,7 @@
 
     @decorators.idempotent_id('f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107')
     def test_get_quotas(self):
-        # User can get the quota set for it's tenant
+        """Test user can get the compute quota set for it's project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = self.client.show_quota_set(self.tenant_id)['quota_set']
         self.assertEqual(quota_set['id'], self.tenant_id)
@@ -75,7 +76,7 @@
 
     @decorators.idempotent_id('9bfecac7-b966-4f47-913f-1a9e2c12134a')
     def test_get_default_quotas(self):
-        # User can get the default quota set for it's tenant
+        """Test user can get the default compute quota set for it's project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = (self.client.show_default_quota_set(self.tenant_id)
                      ['quota_set'])
@@ -85,7 +86,7 @@
 
     @decorators.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc')
     def test_compare_tenant_quotas_with_default_quotas(self):
-        # Tenants are created with the default quota values
+        """Test tenants are created with the default compute quota values"""
         default_quota_set = \
             self.client.show_default_quota_set(self.tenant_id)['quota_set']
         tenant_quota_set = (self.client.show_quota_set(self.tenant_id)
diff --git a/tempest/api/compute/test_tenant_networks.py b/tempest/api/compute/test_tenant_networks.py
index f4eada0..17f4b80 100644
--- a/tempest/api/compute/test_tenant_networks.py
+++ b/tempest/api/compute/test_tenant_networks.py
@@ -18,6 +18,8 @@
 
 
 class ComputeTenantNetworksTest(base.BaseV2ComputeTest):
+    """Test compute tenant networks API with microversion less than 2.36"""
+
     max_microversion = '2.35'
 
     @classmethod
@@ -34,8 +36,11 @@
     @decorators.idempotent_id('edfea98e-bbe3-4c7a-9739-87b986baff26')
     @utils.services('network')
     def test_list_show_tenant_networks(self):
-        # Fetch all networks that are visible to the tenant: this may include
-        # shared and external networks
+        """Test list/show tenant networks
+
+        Fetch all networks that are visible to the tenant: this may include
+        shared and external networks.
+        """
         tenant_networks = [
             n['id'] for n in self.client.list_tenant_networks()['networks']
         ]
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 97813a5..4c7c234 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -59,13 +59,17 @@
 
 
 class AttachVolumeTestJSON(BaseAttachVolumeTest):
+    """Test attaching volume to server"""
 
     @decorators.idempotent_id('52e9045a-e90d-4c0d-9087-79d657faffff')
     # This test is conditionally marked slow if SSH validation is enabled.
     @decorators.attr(type='slow', condition=CONF.validation.run_validation)
     def test_attach_detach_volume(self):
-        # Stop and Start a server with an attached volume, ensuring that
-        # the volume remains attached.
+        """Test attaching and detaching volume from server
+
+        Stop and Start a server with an attached volume, ensuring that
+        the volume remains attached.
+        """
         server, validation_resources = self._create_server()
 
         # NOTE(andreaf) Create one remote client used throughout the test.
@@ -125,6 +129,13 @@
 
     @decorators.idempotent_id('7fa563fe-f0f7-43eb-9e22-a1ece036b513')
     def test_list_get_volume_attachments(self):
+        """Test listing and getting volume attachments
+
+        First we attach one volume to the server, check listing and getting
+        the volume attachment of the server. Then we attach another volume to
+        the server, check listing and getting the volume attachments of the
+        server. Finally we detach the volumes from the server one by one.
+        """
         # List volume attachment of the server
         server, validation_resources = self._create_server()
         volume_1st = self.create_volume()
@@ -189,6 +200,10 @@
         super(AttachVolumeShelveTestJSON, cls).skip_checks()
         if not CONF.compute_feature_enabled.shelve:
             raise cls.skipException('Shelve is not available.')
+        if CONF.compute.compute_volume_common_az:
+            # assuming cross_az_attach is set to false in nova.conf
+            # per the compute_volume_common_az option description
+            raise cls.skipException('Cross AZ attach not available.')
 
     def _count_volumes(self, server, validation_resources):
         # Count number of volumes on an instance
@@ -244,8 +259,12 @@
     @decorators.attr(type='slow')
     @decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
     def test_attach_volume_shelved_or_offload_server(self):
-        # Create server, count number of volumes on it, shelve
-        # server and attach pre-created volume to shelved server
+        """Test attaching volume to shelved server
+
+        Create server, count number of volumes on it, shelve
+        server and attach pre-created volume to shelved server, then
+        unshelve the server and check that attached volume exists.
+        """
         server, validation_resources = self._create_server()
         volume = self.create_volume()
         num_vol = self._count_volumes(server, validation_resources)
@@ -271,8 +290,12 @@
     @decorators.attr(type='slow')
     @decorators.idempotent_id('b54e86dd-a070-49c4-9c07-59ae6dae15aa')
     def test_detach_volume_shelved_or_offload_server(self):
-        # Count number of volumes on instance, shelve
-        # server and attach pre-created volume to shelved server
+        """Test detaching volume from shelved server
+
+        Count number of volumes on server, shelve server and attach
+        pre-created volume to shelved server, then detach the volume, unshelve
+        the instance and check that we have the expected number of volume(s).
+        """
         server, validation_resources = self._create_server()
         volume = self.create_volume()
         num_vol = self._count_volumes(server, validation_resources)
@@ -291,6 +314,12 @@
 
 
 class AttachVolumeMultiAttachTest(BaseAttachVolumeTest):
+    """Test attaching one volume to multiple servers
+
+    Test attaching one volume to multiple servers with compute
+    microversion greater than 2.59.
+    """
+
     min_microversion = '2.60'
     max_microversion = 'latest'
 
@@ -367,6 +396,12 @@
 
     @decorators.idempotent_id('8d5853f7-56e7-4988-9b0c-48cea3c7049a')
     def test_list_get_volume_attachments_multiattach(self):
+        """Test listing and getting multiattached volume attachments
+
+        Attach a single volume to two servers, list attachments from the
+        volume and make sure the server uuids are in the list, then detach
+        the volume from servers one by one.
+        """
         # Attach a single volume to two servers.
         servers, volume, attachments = self._create_and_multiattach()
 
@@ -420,6 +455,8 @@
 
     @utils.services('image')
     @decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
+    @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
+                          'Snapshotting is not available.')
     def test_snapshot_volume_backed_multiattach(self):
         """Boots a server from a multiattach volume and snapshots the server.
 
@@ -448,7 +485,10 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_with_multiattached_volume(self):
-        # Attach a single volume to multiple servers, then resize the servers
+        """Test resizing servers with multiattached volume
+
+        Attach a single volume to multiple servers, then resize the servers
+        """
         servers, volume, _ = self._create_and_multiattach()
 
         for server in servers:
diff --git a/tempest/api/compute/volumes/test_attach_volume_negative.py b/tempest/api/compute/volumes/test_attach_volume_negative.py
index 9a506af..516f599 100644
--- a/tempest/api/compute/volumes/test_attach_volume_negative.py
+++ b/tempest/api/compute/volumes/test_attach_volume_negative.py
@@ -21,6 +21,8 @@
 
 
 class AttachVolumeNegativeTest(base.BaseV2ComputeTest):
+    """Negative tests of volume attaching"""
+
     create_default_network = True
 
     @classmethod
@@ -34,6 +36,7 @@
     @decorators.related_bug('1630783', status_code=500)
     @decorators.idempotent_id('a313b5cd-fbd0-49cc-94de-870e99f763c7')
     def test_delete_attached_volume(self):
+        """Test deleting attachemd volume should fail"""
         server = self.create_test_server(wait_until='ACTIVE')
         volume = self.create_volume()
         self.attach_volume(server, volume)
@@ -44,10 +47,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('aab919e2-d992-4cbb-a4ed-745c2475398c')
     def test_attach_attached_volume_to_same_server(self):
-        # Test attaching the same volume to the same instance once
-        # it's already attached. The nova/cinder validation for this differs
-        # depending on whether or not cinder v3.27 is being used to attach
-        # the volume to the instance.
+        """Test attaching attached volume to same server should fail
+
+        Test attaching the same volume to the same instance once
+        it's already attached. The nova/cinder validation for this differs
+        depending on whether or not cinder v3.27 is being used to attach
+        the volume to the instance.
+        """
         server = self.create_test_server(wait_until='ACTIVE')
         volume = self.create_volume()
 
@@ -59,6 +65,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ee37a796-2afb-11e7-bc0f-fa163e65f5ce')
     def test_attach_attached_volume_to_different_server(self):
+        """Test attaching attached volume to different server should fail"""
         server1 = self.create_test_server(wait_until='ACTIVE')
         volume = self.create_volume()
 
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index f3ccf8d..30bea60 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -24,6 +24,7 @@
 
 
 class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest):
+    """Test volume snapshots with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -48,6 +49,7 @@
 
     @decorators.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
     def test_volume_snapshot_create_get_list_delete(self):
+        """Test create/get/list/delete volume snapshot"""
         volume = self.create_volume()
         self.addCleanup(self.delete_volume, volume['id'])
 
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 0d23c1f..554f418 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -25,6 +25,7 @@
 
 
 class VolumesGetTestJSON(base.BaseV2ComputeTest):
+    """Test compute volumes API with microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -45,7 +46,7 @@
 
     @decorators.idempotent_id('f10f25eb-9775-4d9d-9cbe-1cf54dae9d5f')
     def test_volume_create_get_delete(self):
-        # CREATE, GET, DELETE Volume
+        """Test create/get/delete volume"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         # Create volume
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 28bc174..0b37264 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -21,6 +21,8 @@
 
 
 class VolumesTestJSON(base.BaseV2ComputeTest):
+    """Test listing volumes with compute microversion less than 2.36"""
+
     # NOTE: This test creates a number of 1G volumes. To run successfully,
     # ensure that the backing file for the volume group that Nova uses
     # has space for at least 3 1G volumes!
@@ -57,7 +59,7 @@
 
     @decorators.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
     def test_volume_list(self):
-        # Should return the list of Volumes
+        """Test listing volumes should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes()['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -72,7 +74,7 @@
 
     @decorators.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
     def test_volume_list_with_details(self):
-        # Should return the list of Volumes with details
+        """Test listing volumes with detail should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes(detail=True)['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -87,7 +89,11 @@
 
     @decorators.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
     def test_volume_list_param_limit(self):
-        # Return the list of volumes based on limit set
+        """Test listing volumes based on limit set
+
+        If we list volumes with limit=2, then only 2 volumes should be
+        returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(**params)['volumes']
 
@@ -96,7 +102,11 @@
 
     @decorators.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
     def test_volume_list_with_detail_param_limit(self):
-        # Return the list of volumes with details based on limit set.
+        """Test listing volumes with detail based on limit set
+
+        If we list volumes with detail with limit=2, then only 2 volumes with
+        detail should be returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(detail=True,
                                                     **params)['volumes']
@@ -106,7 +116,12 @@
 
     @decorators.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
     def test_volume_list_param_offset_and_limit(self):
-        # Return the list of volumes based on offset and limit set.
+        """Test listing volumes based on offset and limit set
+
+        If we list volumes with offset=1 and limit=1, then 1 volume located
+        in the position 1 in the all volumes list should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes()['volumes']
         params = {'offset': 1, 'limit': 1}
@@ -123,7 +138,13 @@
 
     @decorators.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
     def test_volume_list_with_detail_param_offset_and_limit(self):
-        # Return the list of volumes details based on offset and limit set.
+        """Test listing volumes with detail based on offset and limit set
+
+        If we list volumes with detail with offset=1 and limit=1, then 1
+        volume with detail located in the position 1 in the all volumes list
+        should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes(detail=True)['volumes']
         params = {'offset': 1, 'limit': 1}
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index 444ce93..f553e32 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -23,6 +23,7 @@
 
 
 class VolumesNegativeTest(base.BaseV2ComputeTest):
+    """Negative tests of volumes with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -44,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c03ea686-905b-41a2-8748-9635154b7c57')
     def test_volume_get_nonexistent_volume_id(self):
-        # Negative: Should not be able to get details of nonexistent volume
+        """Test getting details of a non existent volume should fail"""
         # Creating a nonexistent volume id
         # Trying to GET a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.show_volume,
@@ -53,7 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('54a34226-d910-4b00-9ef8-8683e6c55846')
     def test_volume_delete_nonexistent_volume_id(self):
-        # Negative: Should not be able to delete nonexistent Volume
+        """Test deleting a nonexistent volume should fail"""
         # Creating nonexistent volume id
         # Trying to DELETE a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
@@ -62,8 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5125ae14-152b-40a7-b3c5-eae15e9022ef')
     def test_create_volume_with_invalid_size(self):
-        # Negative: Should not be able to create volume with invalid size
-        # in request
+        """Test creating volume with invalid size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -72,8 +72,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('131cb3a1-75cc-4d40-b4c3-1317f64719b0')
     def test_create_volume_without_passing_size(self):
-        # Negative: Should not be able to create volume without passing size
-        # in request
+        """Test creating volume without specifying size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -82,7 +81,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8cce995e-0a83-479a-b94d-e1e40b8a09d1')
     def test_create_volume_with_size_zero(self):
-        # Negative: Should not be able to create volume with size zero
+        """Test creating volume with size=0 should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -91,14 +90,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62bab09a-4c03-4617-8cca-8572bc94af9b')
     def test_get_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to get volume when empty ID is passed
+        """Test getting volume details without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.show_volume, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62972737-124b-4513-b6cf-2f019f178494')
     def test_delete_invalid_volume_id(self):
-        # Negative: Should not be able to delete volume when invalid ID is
-        # passed
+        """Test deleting volume with an invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_volume,
                           data_utils.rand_name('invalid'))
@@ -106,5 +104,5 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0d1417c5-4ae8-4c2c-adc5-5f0b864253e5')
     def test_delete_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to delete volume when empty ID is passed
+        """Test deleting volume without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/identity/admin/v2/test_endpoints.py b/tempest/api/identity/admin/v2/test_endpoints.py
index 947706e..236ce7c 100644
--- a/tempest/api/identity/admin/v2/test_endpoints.py
+++ b/tempest/api/identity/admin/v2/test_endpoints.py
@@ -19,6 +19,7 @@
 
 
 class EndPointsTestJSON(base.BaseIdentityV2AdminTest):
+    """Test keystone v2 endpoints"""
 
     @classmethod
     def resource_setup(cls):
@@ -51,6 +52,7 @@
 
     @decorators.idempotent_id('11f590eb-59d8-4067-8b2b-980c7f387f51')
     def test_list_endpoints(self):
+        """Test listing keystone endpoints"""
         # Get a list of endpoints
         fetched_endpoints = self.endpoints_client.list_endpoints()['endpoints']
         # Asserting LIST endpoints
@@ -62,6 +64,7 @@
 
     @decorators.idempotent_id('9974530a-aa28-4362-8403-f06db02b26c1')
     def test_create_list_delete_endpoint(self):
+        """Test creating, listing and deleting a keystone endpoint"""
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
         endpoint = self.endpoints_client.create_endpoint(
diff --git a/tempest/api/identity/admin/v2/test_roles_negative.py b/tempest/api/identity/admin/v2/test_roles_negative.py
index f3b7494..3c71ba9 100644
--- a/tempest/api/identity/admin/v2/test_roles_negative.py
+++ b/tempest/api/identity/admin/v2/test_roles_negative.py
@@ -20,6 +20,7 @@
 
 
 class RolesNegativeTestJSON(base.BaseIdentityV2AdminTest):
+    """Negative tests of keystone roles via v2 API"""
 
     def _get_role_params(self):
         user = self.setup_test_user()
@@ -30,14 +31,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d5d5f1df-f8ca-4de0-b2ef-259c1cc67025')
     def test_list_roles_by_unauthorized_user(self):
-        # Non-administrator user should not be able to list roles
+        """Test Non-admin user should not be able to list roles via v2 API"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_roles_client.list_roles)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('11a3c7da-df6c-40c2-abc2-badd682edf9f')
     def test_list_roles_request_without_token(self):
-        # Request to list roles without a valid token should fail
+        """Test listing roles without a valid token via v2 API should fail"""
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(lib_exc.Unauthorized, self.roles_client.list_roles)
@@ -46,14 +47,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c0b89e56-accc-4c73-85f8-9c0f866104c1')
     def test_role_create_blank_name(self):
-        # Should not be able to create a role with a blank name
+        """Test creating a role with a blank name via v2 API is not allowed"""
         self.assertRaises(lib_exc.BadRequest, self.roles_client.create_role,
                           name='')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('585c8998-a8a4-4641-a5dd-abef7a8ced00')
     def test_create_role_by_unauthorized_user(self):
-        # Non-administrator user should not be able to create role
+        """Test non-admin user should not be able to create role via v2 API"""
         role_name = data_utils.rand_name(name='role')
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_roles_client.create_role,
@@ -62,7 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a7edd17a-e34a-4aab-8bb7-fa6f498645b8')
     def test_create_role_request_without_token(self):
-        # Request to create role without a valid token should fail
+        """Test creating role without a valid token via v2 API should fail"""
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         role_name = data_utils.rand_name(name='role')
@@ -73,7 +74,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c0cde2c8-81c1-4bb0-8fe2-cf615a3547a8')
     def test_role_create_duplicate(self):
-        # Role names should be unique
+        """Test role names should be unique via v2 API"""
         role_name = data_utils.rand_name(name='role-dup')
         body = self.roles_client.create_role(name=role_name)['role']
         role1_id = body.get('id')
@@ -84,7 +85,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('15347635-b5b1-4a87-a280-deb2bd6d865e')
     def test_delete_role_by_unauthorized_user(self):
-        # Non-administrator user should not be able to delete role
+        """Test non-admin user should not be able to delete role via v2 API"""
         role_name = data_utils.rand_name(name='role')
         body = self.roles_client.create_role(name=role_name)['role']
         self.addCleanup(self.roles_client.delete_role, body['id'])
@@ -95,7 +96,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('44b60b20-70de-4dac-beaf-a3fc2650a16b')
     def test_delete_role_request_without_token(self):
-        # Request to delete role without a valid token should fail
+        """Test deleting role without a valid token via v2 API should fail"""
         role_name = data_utils.rand_name(name='role')
         body = self.roles_client.create_role(name=role_name)['role']
         self.addCleanup(self.roles_client.delete_role, body['id'])
@@ -110,7 +111,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('38373691-8551-453a-b074-4260ad8298ef')
     def test_delete_role_non_existent(self):
-        # Attempt to delete a non existent role should fail
+        """Test deleting a non existent role via v2 API should fail"""
         non_existent_role = data_utils.rand_uuid_hex()
         self.assertRaises(lib_exc.NotFound, self.roles_client.delete_role,
                           non_existent_role)
@@ -118,8 +119,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('391df5cf-3ec3-46c9-bbe5-5cb58dd4dc41')
     def test_assign_user_role_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to
-        # assign a role to user
+        """Test non-admin user assigning a role to user via v2 API
+
+        Non-admin user should not be authorized to assign a role to user via
+        v2 API.
+        """
         (user, tenant, role) = self._get_role_params()
         self.assertRaises(
             lib_exc.Forbidden,
@@ -129,7 +133,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f0d2683c-5603-4aee-95d7-21420e87cfd8')
     def test_assign_user_role_request_without_token(self):
-        # Request to assign a role to a user without a valid token
+        """Test assigning a role to a user without a valid token via v2 API
+
+        Assigning a role to a user without a valid token via v2 API should
+        fail.
+        """
         (user, tenant, role) = self._get_role_params()
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
@@ -142,7 +150,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('99b297f6-2b5d-47c7-97a9-8b6bb4f91042')
     def test_assign_user_role_for_non_existent_role(self):
-        # Attempt to assign a non existent role to user should fail
+        """Test assigning a non existent role to user via v2 API
+
+        Assigning a non existent role to user via v2 API should fail.
+        """
         (user, tenant, _) = self._get_role_params()
         non_existent_role = data_utils.rand_uuid_hex()
         self.assertRaises(lib_exc.NotFound,
@@ -152,7 +163,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b2285aaa-9e76-4704-93a9-7a8acd0a6c8f')
     def test_assign_user_role_for_non_existent_tenant(self):
-        # Attempt to assign a role on a non existent tenant should fail
+        """Test assigning a role on a non existent tenant via v2 API
+
+        Assigning a role on a non existent tenant via v2 API should fail.
+        """
         (user, _, role) = self._get_role_params()
         non_existent_tenant = data_utils.rand_uuid_hex()
         self.assertRaises(lib_exc.NotFound,
@@ -162,7 +176,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5c3132cd-c4c8-4402-b5ea-71eb44e97793')
     def test_assign_duplicate_user_role(self):
-        # Duplicate user role should not get assigned
+        """Test duplicate user role should not get assigned via v2 API"""
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -174,8 +188,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d0537987-0977-448f-a435-904c15de7298')
     def test_remove_user_role_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to
-        # remove a user's role
+        """Test non-admin user removing a user's role via v2 API
+
+        Non-admin user should not be authorized to remove a user's role via
+        v2 API
+        """
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -188,7 +205,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('cac81cf4-c1d2-47dc-90d3-f2b7eb572286')
     def test_remove_user_role_request_without_token(self):
-        # Request to remove a user's role without a valid token
+        """Test removing a user's role without a valid token via v2 API
+
+        Removing a user's role without a valid token via v2 API should fail.
+        """
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -203,7 +223,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ab32d759-cd16-41f1-a86e-44405fa9f6d2')
     def test_remove_user_role_non_existent_role(self):
-        # Attempt to delete a non existent role from a user should fail
+        """Test deleting a non existent role from a user via v2 API
+
+        Deleting a non existent role from a user via v2 API should fail.
+        """
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -216,7 +239,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('67a679ec-03dd-4551-bbfc-d1c93284f023')
     def test_remove_user_role_non_existent_tenant(self):
-        # Attempt to remove a role from a non existent tenant should fail
+        """Test removing a role from a non existent tenant via v2 API
+
+        Removing a role from a non existent tenant via v2 API should fail.
+        """
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -229,8 +255,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7391ab4c-06f3-477a-a64a-c8e55ce89837')
     def test_list_user_roles_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to list
-        # a user's roles
+        """Test non-admin user listing a user's roles via v2 API
+
+        Non-admin user should not be authorized to list a user's roles via v2
+        API.
+        """
         (user, tenant, role) = self._get_role_params()
         self.roles_client.create_user_role_on_project(tenant['id'],
                                                       user['id'],
@@ -243,7 +272,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('682adfb2-fd5f-4b0a-a9ca-322e9bebb907')
     def test_list_user_roles_request_without_token(self):
-        # Request to list user's roles without a valid token should fail
+        """Test listing user's roles without a valid token via v2 API
+
+        Listing user's roles without a valid token via v2 API should fail
+        """
         (user, tenant, _) = self._get_role_params()
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
diff --git a/tempest/api/identity/admin/v2/test_services.py b/tempest/api/identity/admin/v2/test_services.py
index 03543ac..182b24c 100644
--- a/tempest/api/identity/admin/v2/test_services.py
+++ b/tempest/api/identity/admin/v2/test_services.py
@@ -20,6 +20,7 @@
 
 
 class ServicesTestJSON(base.BaseIdentityV2AdminTest):
+    """Test identity services via v2 API"""
 
     def _del_service(self, service_id):
         # Deleting the service created in this method
@@ -30,6 +31,7 @@
 
     @decorators.idempotent_id('84521085-c6e6-491c-9a08-ec9f70f90110')
     def test_create_get_delete_service(self):
+        """Test verifies the identity service create/get/delete via v2 API"""
         # GET Service
         # Creating a Service
         name = data_utils.rand_name('service')
@@ -64,7 +66,10 @@
 
     @decorators.idempotent_id('5d3252c8-e555-494b-a6c8-e11d7335da42')
     def test_create_service_without_description(self):
-        # Create a service only with name and type
+        """Test creating identity service without description via v2 API
+
+        Create a service only with name and type.
+        """
         name = data_utils.rand_name('service')
         s_type = data_utils.rand_name('type')
         service = self.services_client.create_service(
@@ -79,7 +84,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('34ea6489-012d-4a86-9038-1287cadd5eca')
     def test_list_services(self):
-        # Create, List, Verify and Delete Services
+        """Test Create/List/Verify/Delete of identity service via v2 API"""
         services = []
         for _ in range(3):
             name = data_utils.rand_name('service')
diff --git a/tempest/api/identity/admin/v2/test_tenant_negative.py b/tempest/api/identity/admin/v2/test_tenant_negative.py
index 49bb949..792dad9 100644
--- a/tempest/api/identity/admin/v2/test_tenant_negative.py
+++ b/tempest/api/identity/admin/v2/test_tenant_negative.py
@@ -20,18 +20,22 @@
 
 
 class TenantsNegativeTestJSON(base.BaseIdentityV2AdminTest):
+    """Negative tests of keystone tenants via v2 API"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ca9bb202-63dd-4240-8a07-8ef9c19c04bb')
     def test_list_tenants_by_unauthorized_user(self):
-        # Non-administrator user should not be able to list tenants
+        """Test Non-admin should not be able to list tenants via v2 API"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_tenants_client.list_tenants)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('df33926c-1c96-4d8d-a762-79cc6b0c3cf4')
     def test_list_tenant_request_without_token(self):
-        # Request to list tenants without a valid token should fail
+        """Test listing tenants without a valid token via v2 API
+
+        Listing tenants without a valid token via v2 API should fail.
+        """
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
         self.assertRaises(lib_exc.Unauthorized,
@@ -41,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('162ba316-f18b-4987-8c0c-fd9140cd63ed')
     def test_tenant_delete_by_unauthorized_user(self):
-        # Non-administrator user should not be able to delete a tenant
+        """Test non-admin should not be able to delete a tenant via v2 API"""
         tenant = self.setup_test_tenant()
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_tenants_client.delete_tenant,
@@ -50,7 +54,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e450db62-2e9d-418f-893a-54772d6386b1')
     def test_tenant_delete_request_without_token(self):
-        # Request to delete a tenant without a valid token should fail
+        """Test deleting a tenant without a valid token via v2 API
+
+        Deleting a tenant without a valid token via v2 API should fail.
+        """
         tenant = self.setup_test_tenant()
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
@@ -62,14 +69,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c9a2aed-6e3c-467a-8f5c-89da9d1b516b')
     def test_delete_non_existent_tenant(self):
-        # Attempt to delete a non existent tenant should fail
+        """Test deleting a non existent tenant via v2 API should fail"""
         self.assertRaises(lib_exc.NotFound, self.tenants_client.delete_tenant,
                           data_utils.rand_uuid_hex())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('af16f44b-a849-46cb-9f13-a751c388f739')
     def test_tenant_create_duplicate(self):
-        # Tenant names should be unique
+        """Test tenant names should be unique via v2 API"""
         tenant_name = data_utils.rand_name(name='tenant')
         self.setup_test_tenant(name=tenant_name)
         self.assertRaises(lib_exc.Conflict, self.tenants_client.create_tenant,
@@ -78,7 +85,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d26b278a-6389-4702-8d6e-5980d80137e0')
     def test_create_tenant_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to create a tenant
+        """Test non-admin user creating a tenant via v2 API
+
+        Non-admin user should not be authorized to create a tenant via v2 API.
+        """
         tenant_name = data_utils.rand_name(name='tenant')
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_tenants_client.create_tenant,
@@ -87,7 +97,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a3ee9d7e-6920-4dd5-9321-d4b2b7f0a638')
     def test_create_tenant_request_without_token(self):
-        # Create tenant request without a token should not be authorized
+        """Test creating tenant without a token via v2 API is not allowed"""
         tenant_name = data_utils.rand_name(name='tenant')
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
@@ -99,7 +109,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5a2e4ca9-b0c0-486c-9c48-64a94fba2395')
     def test_create_tenant_with_empty_name(self):
-        # Tenant name should not be empty
+        """Test tenant name should not be empty via v2 API"""
         self.assertRaises(lib_exc.BadRequest,
                           self.tenants_client.create_tenant,
                           name='')
@@ -107,7 +117,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2ff18d1e-dfe3-4359-9dc3-abf582c196b9')
     def test_create_tenants_name_length_over_64(self):
-        # Tenant name length should not be greater than 64 characters
+        """Test tenant name length should not exceed 64 via v2 API"""
         tenant_name = 'a' * 65
         self.assertRaises(lib_exc.BadRequest,
                           self.tenants_client.create_tenant,
@@ -116,14 +126,17 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bd20dc2a-9557-4db7-b755-f48d952ad706')
     def test_update_non_existent_tenant(self):
-        # Attempt to update a non existent tenant should fail
+        """Test updating a non existent tenant via v2 API should fail"""
         self.assertRaises(lib_exc.NotFound, self.tenants_client.update_tenant,
                           data_utils.rand_uuid_hex())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41704dc5-c5f7-4f79-abfa-76e6fedc570b')
     def test_tenant_update_by_unauthorized_user(self):
-        # Non-administrator user should not be able to update a tenant
+        """Test non-admin user updating a tenant via v2 API
+
+        Non-admin user should not be able to update a tenant via v2 API
+        """
         tenant = self.setup_test_tenant()
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_tenants_client.update_tenant,
@@ -132,7 +145,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7a421573-72c7-4c22-a98e-ce539219c657')
     def test_tenant_update_request_without_token(self):
-        # Request to update a tenant without a valid token should fail
+        """Test updating a tenant without a valid token via v2 API
+
+        Updating a tenant without a valid token via v2 API should fail
+        """
         tenant = self.setup_test_tenant()
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
diff --git a/tempest/api/identity/admin/v2/test_tenants.py b/tempest/api/identity/admin/v2/test_tenants.py
index f68754e..5f73e1c 100644
--- a/tempest/api/identity/admin/v2/test_tenants.py
+++ b/tempest/api/identity/admin/v2/test_tenants.py
@@ -19,10 +19,14 @@
 
 
 class TenantsTestJSON(base.BaseIdentityV2AdminTest):
+    """Test identity tenants via v2 API"""
 
     @decorators.idempotent_id('16c6e05c-6112-4b0e-b83f-5e43f221b6b0')
     def test_tenant_list_delete(self):
-        # Create several tenants and delete them
+        """Test listing and deleting tenants via v2 API
+
+        Create several tenants and delete them
+        """
         tenants = []
         for _ in range(3):
             tenant = self.setup_test_tenant()
@@ -41,7 +45,7 @@
 
     @decorators.idempotent_id('d25e9f24-1310-4d29-b61b-d91299c21d6d')
     def test_tenant_create_with_description(self):
-        # Create tenant with a description
+        """Test creating tenant with a description via v2 API"""
         tenant_desc = data_utils.rand_name(name='desc')
         tenant = self.setup_test_tenant(description=tenant_desc)
         tenant_id = tenant['id']
@@ -56,7 +60,7 @@
 
     @decorators.idempotent_id('670bdddc-1cd7-41c7-b8e2-751cfb67df50')
     def test_tenant_create_enabled(self):
-        # Create a tenant that is enabled
+        """Test creating a tenant that is enabled via v2 API"""
         tenant = self.setup_test_tenant(enabled=True)
         tenant_id = tenant['id']
         self.assertTrue(tenant['enabled'], 'Enable should be True in response')
@@ -66,7 +70,7 @@
 
     @decorators.idempotent_id('3be22093-b30f-499d-b772-38340e5e16fb')
     def test_tenant_create_not_enabled(self):
-        # Create a tenant that is not enabled
+        """Test creating a tenant that is not enabled via v2 API"""
         tenant = self.setup_test_tenant(enabled=False)
         tenant_id = tenant['id']
         self.assertFalse(tenant['enabled'],
@@ -78,7 +82,7 @@
 
     @decorators.idempotent_id('781f2266-d128-47f3-8bdb-f70970add238')
     def test_tenant_update_name(self):
-        # Update name attribute of a tenant
+        """Test updating name attribute of a tenant via v2 API"""
         t_name1 = data_utils.rand_name(name='tenant')
         tenant = self.setup_test_tenant(name=t_name1)
         t_id = tenant['id']
@@ -100,7 +104,7 @@
 
     @decorators.idempotent_id('859fcfe1-3a03-41ef-86f9-b19a47d1cd87')
     def test_tenant_update_desc(self):
-        # Update description attribute of a tenant
+        """Test updating description attribute of a tenant via v2 API"""
         t_desc = data_utils.rand_name(name='desc')
         tenant = self.setup_test_tenant(description=t_desc)
         t_id = tenant['id']
@@ -123,7 +127,7 @@
 
     @decorators.idempotent_id('8fc8981f-f12d-4c66-9972-2bdcf2bc2e1a')
     def test_tenant_update_enable(self):
-        # Update the enabled attribute of a tenant
+        """Test updating the enabled attribute of a tenant via v2 API"""
         t_en = False
         tenant = self.setup_test_tenant(enabled=t_en)
         t_id = tenant['id']
diff --git a/tempest/api/identity/admin/v2/test_tokens.py b/tempest/api/identity/admin/v2/test_tokens.py
index 6ce1a8b..5d89f9d 100644
--- a/tempest/api/identity/admin/v2/test_tokens.py
+++ b/tempest/api/identity/admin/v2/test_tokens.py
@@ -23,9 +23,11 @@
 
 
 class TokensTestJSON(base.BaseIdentityV2AdminTest):
+    """Test keystone tokens via v2 API"""
 
     @decorators.idempotent_id('453ad4d5-e486-4b2f-be72-cffc8149e586')
     def test_create_check_get_delete_token(self):
+        """Test getting create/check/get/delete token for user via v2 API"""
         # get a token by username and password
         user_name = data_utils.rand_name(name='user')
         user_password = data_utils.rand_password()
@@ -59,7 +61,7 @@
 
     @decorators.idempotent_id('25ba82ee-8a32-4ceb-8f50-8b8c71e8765e')
     def test_rescope_token(self):
-        """An unscoped token can be requested
+        """Test an unscoped token can be requested via v2 API
 
         That token can be used to request a scoped token.
         """
@@ -112,6 +114,7 @@
 
     @decorators.idempotent_id('ca3ea6f7-ed08-4a61-adbd-96906456ad31')
     def test_list_endpoints_for_token(self):
+        """Test listing endpoints for token via v2 API"""
         tempest_services = ['keystone', 'nova', 'neutron', 'swift', 'cinder',
                             'neutron']
         # get a token for the user
diff --git a/tempest/api/identity/admin/v2/test_tokens_negative.py b/tempest/api/identity/admin/v2/test_tokens_negative.py
index eb3e365..f2e41ff 100644
--- a/tempest/api/identity/admin/v2/test_tokens_negative.py
+++ b/tempest/api/identity/admin/v2/test_tokens_negative.py
@@ -19,12 +19,17 @@
 
 
 class TokensAdminTestNegative(base.BaseIdentityV2AdminTest):
+    """Negative tests of keystone tokens via v2 API"""
 
     credentials = ['primary', 'admin', 'alt']
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a0a0a600-4292-4364-99c5-922c834fdf05')
     def test_check_token_existence_negative(self):
+        """Test checking other tenant's token existence via v2 API
+
+        Checking other tenant's token existence via v2 API should fail.
+        """
         creds = self.os_primary.credentials
         creds_alt = self.os_alt.credentials
         username = creds.username
diff --git a/tempest/api/identity/admin/v2/test_users.py b/tempest/api/identity/admin/v2/test_users.py
index 0d98af5..57a321a 100644
--- a/tempest/api/identity/admin/v2/test_users.py
+++ b/tempest/api/identity/admin/v2/test_users.py
@@ -23,6 +23,7 @@
 
 
 class UsersTestJSON(base.BaseIdentityV2AdminTest):
+    """Test keystone users via v2 API"""
 
     @classmethod
     def resource_setup(cls):
@@ -33,14 +34,14 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2d55a71e-da1d-4b43-9c03-d269fd93d905')
     def test_create_user(self):
-        # Create a user
+        """Test creating a user via v2 API"""
         tenant = self.setup_test_tenant()
         user = self.create_test_user(name=self.alt_user, tenantId=tenant['id'])
         self.assertEqual(self.alt_user, user['name'])
 
     @decorators.idempotent_id('89d9fdb8-15c2-4304-a429-48715d0af33d')
     def test_create_user_with_enabled(self):
-        # Create a user with enabled : False
+        """Test creating a user with enabled : False via v2 API"""
         tenant = self.setup_test_tenant()
         name = data_utils.rand_name('test_user')
         user = self.create_test_user(name=name,
@@ -53,7 +54,7 @@
 
     @decorators.idempotent_id('39d05857-e8a5-4ed4-ba83-0b52d3ab97ee')
     def test_update_user(self):
-        # Test case to check if updating of user attributes is successful.
+        """Test updating user attributes via v2 API"""
         tenant = self.setup_test_tenant()
         user = self.create_test_user(tenantId=tenant['id'])
 
@@ -75,14 +76,14 @@
 
     @decorators.idempotent_id('29ed26f4-a74e-4425-9a85-fdb49fa269d2')
     def test_delete_user(self):
-        # Delete a user
+        """Test deleting a user via v2 API"""
         tenant = self.setup_test_tenant()
         user = self.create_test_user(tenantId=tenant['id'])
         self.users_client.delete_user(user['id'])
 
     @decorators.idempotent_id('aca696c3-d645-4f45-b728-63646045beb1')
     def test_user_authentication(self):
-        # Valid user's token is authenticated
+        """Test that valid user's token is authenticated via v2 API"""
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
@@ -97,6 +98,7 @@
 
     @decorators.idempotent_id('5d1fa498-4c2d-4732-a8fe-2b054598cfdd')
     def test_authentication_request_without_token(self):
+        """Test authentication request without token via v2 API"""
         # Request for token authentication with a valid token in header
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
@@ -116,7 +118,10 @@
 
     @decorators.idempotent_id('a149c02e-e5e0-4b89-809e-7e8faf33ccda')
     def test_get_users(self):
-        # Get a list of users and find the test user
+        """Test getting users via v2 API
+
+        Get a list of users and find the test user
+        """
         user = self.setup_test_user()
         users = self.users_client.list_users()['users']
         self.assertThat([u['name'] for u in users],
@@ -125,7 +130,7 @@
 
     @decorators.idempotent_id('6e317209-383a-4bed-9f10-075b7c82c79a')
     def test_list_users_for_tenant(self):
-        # Return a list of all users for a tenant
+        """Test returning a list of all users for a tenant via v2 API"""
         tenant = self.setup_test_tenant()
         user_ids = list()
         fetched_user_ids = list()
@@ -147,7 +152,7 @@
 
     @decorators.idempotent_id('a8b54974-40e1-41c0-b812-50fc90827971')
     def test_list_users_with_roles_for_tenant(self):
-        # Return list of users on tenant when roles are assigned to users
+        """Test listing users on tenant with roles assigned via v2 API"""
         user = self.setup_test_user()
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
         role = self.setup_test_role()
@@ -175,7 +180,7 @@
 
     @decorators.idempotent_id('1aeb25ac-6ec5-4d8b-97cb-7ac3567a989f')
     def test_update_user_password(self):
-        # Test case to check if updating of user password is successful.
+        """Test updating of user password via v2 API"""
         user = self.setup_test_user()
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
         # Updating the user with new password
diff --git a/tempest/api/identity/admin/v2/test_users_negative.py b/tempest/api/identity/admin/v2/test_users_negative.py
index 4f47e41..eda1fdd 100644
--- a/tempest/api/identity/admin/v2/test_users_negative.py
+++ b/tempest/api/identity/admin/v2/test_users_negative.py
@@ -20,6 +20,7 @@
 
 
 class UsersNegativeTestJSON(base.BaseIdentityV2AdminTest):
+    """Negative tests of identity users via v2 API"""
 
     @classmethod
     def resource_setup(cls):
@@ -31,7 +32,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('60a1f5fa-5744-4cdf-82bf-60b7de2d29a4')
     def test_create_user_by_unauthorized_user(self):
-        # Non-administrator should not be authorized to create a user
+        """Non-admin should not be authorized to create a user via v2 API"""
         tenant = self.setup_test_tenant()
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_users_client.create_user,
@@ -42,7 +43,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d80d0c2f-4514-4d1e-806d-0930dfc5a187')
     def test_create_user_with_empty_name(self):
-        # User with an empty name should not be created
+        """User with an empty name should not be created via v2 API"""
         tenant = self.setup_test_tenant()
         self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
                           name='', password=self.alt_password,
@@ -52,7 +53,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7704b4f3-3b75-4b82-87cc-931d41c8f780')
     def test_create_user_with_name_length_over_255(self):
-        # Length of user name filed should be restricted to 255 characters
+        """Length of user name should not exceed 255 via v2 API"""
         tenant = self.setup_test_tenant()
         self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
                           name='a' * 256, password=self.alt_password,
@@ -62,7 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('57ae8558-120c-4723-9308-3751474e7ecf')
     def test_create_user_with_duplicate_name(self):
-        # Duplicate user should not be created
+        """Duplicate user should not be created via v2 API"""
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
@@ -75,7 +76,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0132cc22-7c4f-42e1-9e50-ac6aad31d59a')
     def test_create_user_for_non_existent_tenant(self):
-        # Attempt to create a user in a non-existent tenant should fail
+        """Creating a user in a non-existent tenant via v2 API should fail"""
         self.assertRaises(lib_exc.NotFound, self.users_client.create_user,
                           name=self.alt_user,
                           password=self.alt_password,
@@ -85,7 +86,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('55bbb103-d1ae-437b-989b-bcdf8175c1f4')
     def test_create_user_request_without_a_token(self):
-        # Request to create a user without a valid token should fail
+        """Creating a user without a valid token via v2 API should fail"""
         tenant = self.setup_test_tenant()
         # Get the token of the current client
         token = self.client.auth_provider.get_token()
@@ -103,7 +104,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('23a2f3da-4a1a-41da-abdd-632328a861ad')
     def test_create_user_with_enabled_non_bool(self):
-        # Attempt to create a user with valid enabled para should fail
+        """Creating a user with invalid enabled para via v2 API should fail"""
         tenant = self.setup_test_tenant()
         name = data_utils.rand_name('test_user')
         self.assertRaises(lib_exc.BadRequest, self.users_client.create_user,
@@ -114,7 +115,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3d07e294-27a0-4144-b780-a2a1bf6fee19')
     def test_update_user_for_non_existent_user(self):
-        # Attempt to update a user non-existent user should fail
+        """Updating a non-existent user via v2 API should fail"""
         user_name = data_utils.rand_name('user')
         non_existent_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.users_client.update_user,
@@ -123,7 +124,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3cc2a64b-83aa-4b02-88f0-d6ab737c4466')
     def test_update_user_request_without_a_token(self):
-        # Request to update a user without a valid token should fail
+        """Updating a user without a valid token via v2 API should fail"""
 
         # Get the token of the current client
         token = self.client.auth_provider.get_token()
@@ -139,7 +140,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('424868d5-18a7-43e1-8903-a64f95ee3aac')
     def test_update_user_by_unauthorized_user(self):
-        # Non-administrator should not be authorized to update user
+        """Non-admin should not be authorized to update user via v2 API"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_users_client.update_user,
                           self.alt_user)
@@ -147,7 +148,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d45195d5-33ed-41b9-a452-7d0d6a00f6e9')
     def test_delete_users_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to delete a user
+        """Non-admin should not be authorized to delete a user via v2 API"""
         user = self.setup_test_user()
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_users_client.delete_user,
@@ -156,14 +157,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7cc82f7e-9998-4f89-abae-23df36495867')
     def test_delete_non_existent_user(self):
-        # Attempt to delete a non-existent user should fail
+        """Attempt to delete a non-existent user via v2 API should fail"""
         self.assertRaises(lib_exc.NotFound, self.users_client.delete_user,
                           'junk12345123')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('57fe1df8-0aa7-46c0-ae9f-c2e785c7504a')
     def test_delete_user_request_without_a_token(self):
-        # Request to delete a user without a valid token should fail
+        """Deleting a user without a valid token via v2 API should fail"""
 
         # Get the token of the current client
         token = self.client.auth_provider.get_token()
@@ -179,7 +180,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('593a4981-f6d4-460a-99a1-57a78bf20829')
     def test_authentication_for_disabled_user(self):
-        # Disabled user's token should not get authenticated
+        """Disabled user's token should not get authenticated via v2 API"""
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
@@ -192,7 +193,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('440a7a8d-9328-4b7b-83e0-d717010495e4')
     def test_authentication_when_tenant_is_disabled(self):
-        # User's token for a disabled tenant should not be authenticated
+        """Test User's token for a disabled tenant via v2 API
+
+        User's token for a disabled tenant should not be authenticated via
+        v2 API.
+        """
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
@@ -205,7 +210,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('921f1ad6-7907-40b8-853f-637e7ee52178')
     def test_authentication_with_invalid_tenant(self):
-        # User's token for an invalid tenant should not be authenticated
+        """Test User's token for an invalid tenant via v2 API
+
+        User's token for an invalid tenant should not be authenticated via V2
+        API.
+        """
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
@@ -216,7 +225,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bde9aecd-3b1c-4079-858f-beb5deaa5b5e')
     def test_authentication_with_invalid_username(self):
-        # Non-existent user's token should not get authenticated
+        """Non-existent user's token should not get authorized via v2 API"""
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
@@ -226,7 +235,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d5308b33-3574-43c3-8d87-1c090c5e1eca')
     def test_authentication_with_invalid_password(self):
-        # User's token with invalid password should not be authenticated
+        """Test User's token with invalid password via v2 API
+
+        User's token with invalid password should not be authenticated via V2
+        API.
+        """
         user = self.setup_test_user()
         tenant = self.tenants_client.show_tenant(user['tenantId'])['tenant']
         self.assertRaises(lib_exc.Unauthorized, self.token_client.auth,
@@ -235,14 +248,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('284192ce-fb7c-4909-a63b-9a502e0ddd11')
     def test_get_users_by_unauthorized_user(self):
-        # Non-administrator user should not be authorized to get user list
+        """Non-admin should not be authorized to get user list via v2 API"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_users_client.list_users)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a73591ec-1903-4ffe-be42-282b39fefc9d')
     def test_get_users_request_without_token(self):
-        # Request to get list of users without a valid token should fail
+        """Listing users without a valid token via v2 API should fail"""
         token = self.client.auth_provider.get_token()
         self.client.delete_token(token)
 
@@ -254,8 +267,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f5d39046-fc5f-425c-b29e-bac2632da28e')
     def test_list_users_with_invalid_tenant(self):
-        # Should not be able to return a list of all
-        # users for a non-existent tenant
+        """Listing users for a non-existent tenant via v2 API should fail"""
         # Assign invalid tenant ids
         invalid_id = list()
         invalid_id.append(data_utils.rand_name('999'))
diff --git a/tempest/api/identity/admin/v3/test_application_credentials.py b/tempest/api/identity/admin/v3/test_application_credentials.py
index 7e802c6..f5b0356 100644
--- a/tempest/api/identity/admin/v3/test_application_credentials.py
+++ b/tempest/api/identity/admin/v3/test_application_credentials.py
@@ -20,9 +20,11 @@
 
 class ApplicationCredentialsV3AdminTest(base.BaseApplicationCredentialsV3Test,
                                         base.BaseIdentityV3AdminTest):
+    """Test keystone application credentials"""
 
     @decorators.idempotent_id('3b3dd48f-3388-406a-a9e6-4d078a552d0e')
     def test_create_application_credential_with_roles(self):
+        """Test creating keystone application credential with roles"""
         role = self.setup_test_role()
         self.os_admin.roles_v3_client.create_user_role_on_project(
             self.project_id,
@@ -35,7 +37,7 @@
         secret = app_cred['secret']
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
diff --git a/tempest/api/identity/admin/v3/test_credentials.py b/tempest/api/identity/admin/v3/test_credentials.py
index 23fe788..441f10f 100644
--- a/tempest/api/identity/admin/v3/test_credentials.py
+++ b/tempest/api/identity/admin/v3/test_credentials.py
@@ -20,6 +20,8 @@
 
 
 class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone credentials"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -47,6 +49,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('7cd59bf9-bda4-4c72-9467-d21cab278355')
     def test_credentials_create_get_update_delete(self):
+        """Test creating, getting, updating, deleting of credentials"""
         blob = '{"access": "%s", "secret": "%s"}' % (
             data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
         cred = self.creds_client.create_credential(
@@ -82,6 +85,7 @@
 
     @decorators.idempotent_id('13202c00-0021-42a1-88d4-81b44d448aab')
     def test_credentials_list_delete(self):
+        """Test listing credentials"""
         created_cred_ids = list()
         fetched_cred_ids = list()
 
diff --git a/tempest/api/identity/admin/v3/test_default_project_id.py b/tempest/api/identity/admin/v3/test_default_project_id.py
index 73fddb7..7c3a6cc 100644
--- a/tempest/api/identity/admin/v3/test_default_project_id.py
+++ b/tempest/api/identity/admin/v3/test_default_project_id.py
@@ -22,6 +22,7 @@
 
 
 class TestDefaultProjectId(base.BaseIdentityV3AdminTest):
+    """Test creating a token without project will default to user's project"""
 
     @classmethod
     def setup_credentials(cls):
@@ -35,11 +36,11 @@
         self.domains_client.delete_domain(domain_id)
 
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
-                      'Skipped because environment has an '
-                      'immutable user source and solely '
-                      'provides read-only access to users.')
+                      'Skipped because environment has an immutable user '
+                      'source and solely provides read-only access to users.')
     @decorators.idempotent_id('d6110661-6a71-49a7-a453-b5e26640ff6d')
     def test_default_project_id(self):
+        """Creating a token without project will default to user's project"""
         # create a domain
         dom_name = data_utils.rand_name('dom')
         domain_body = self.domains_client.create_domain(
diff --git a/tempest/api/identity/admin/v3/test_domain_configuration.py b/tempest/api/identity/admin/v3/test_domain_configuration.py
index c0b18ca..a246a36 100644
--- a/tempest/api/identity/admin/v3/test_domain_configuration.py
+++ b/tempest/api/identity/admin/v3/test_domain_configuration.py
@@ -21,6 +21,8 @@
 
 
 class DomainConfigurationTestJSON(base.BaseIdentityV3AdminTest):
+    """Test domain configuration"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -51,10 +53,12 @@
 
     @decorators.idempotent_id('11a02bf0-6f94-4380-b3b0-c8dc18fc0d22')
     def test_show_default_group_config_and_options(self):
-        # The API supports only the identity and ldap groups. For the ldap
-        # group, a valid value is url or user_tree_dn. For the identity group,
-        # a valid value is driver.
+        """Test showing default keystone group config and options
 
+        The API supports only the identity and ldap groups. For the ldap
+        group, a valid value is url or user_tree_dn. For the identity group,
+        a valid value is driver.
+        """
         # Check that the default config has the identity and ldap groups.
         config = self.client.show_default_config_settings()['config']
         self.assertIsInstance(config, dict)
@@ -93,6 +97,7 @@
 
     @decorators.idempotent_id('9e3ff13c-f597-4f01-9377-d6c06c2a1477')
     def test_create_domain_config_and_show_config_groups_and_options(self):
+        """Test creating and showing keystone config groups and options"""
         domain, created_config = self._create_domain_and_config(
             self.custom_config)
 
@@ -117,6 +122,7 @@
 
     @decorators.idempotent_id('7161023e-5dd0-4612-9da0-1bac6ac30b63')
     def test_create_update_and_delete_domain_config(self):
+        """Test creating, updating and deleting keystone domain config"""
         domain, created_config = self._create_domain_and_config(
             self.custom_config)
 
@@ -140,6 +146,7 @@
 
     @decorators.idempotent_id('c7510fa2-6661-4170-9c6b-4783a80651e9')
     def test_create_update_and_delete_domain_config_groups_and_opts(self):
+        """Test create/update/delete keystone domain config groups and opts"""
         domain, _ = self._create_domain_and_config(self.custom_config)
 
         # Check that updating configuration groups work.
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 07175f4..419c6c7 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -24,6 +24,7 @@
 
 
 class DomainsTestJSON(base.BaseIdentityV3AdminTest):
+    """Test identity domains"""
 
     @classmethod
     def resource_setup(cls):
@@ -37,7 +38,7 @@
 
     @decorators.idempotent_id('8cf516ef-2114-48f1-907b-d32726c734d4')
     def test_list_domains(self):
-        # Test to list domains
+        """Test listing domains"""
         fetched_ids = list()
         # List and Verify Domains
         body = self.domains_client.list_domains()['domains']
@@ -49,7 +50,7 @@
 
     @decorators.idempotent_id('c6aee07b-4981-440c-bb0b-eb598f58ffe9')
     def test_list_domains_filter_by_name(self):
-        # List domains filtering by name
+        """Test listing domains filtering by name"""
         params = {'name': self.setup_domains[0]['name']}
         fetched_domains = self.domains_client.list_domains(
             **params)['domains']
@@ -61,7 +62,7 @@
 
     @decorators.idempotent_id('3fd19840-65c1-43f8-b48c-51bdd066dff9')
     def test_list_domains_filter_by_enabled(self):
-        # List domains filtering by enabled domains
+        """Test listing domains filtering by enabled domains"""
         params = {'enabled': True}
         fetched_domains = self.domains_client.list_domains(
             **params)['domains']
@@ -74,6 +75,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f2f5b44a-82e8-4dad-8084-0661ea3b18cf')
     def test_create_update_delete_domain(self):
+        """Test creating, updating and deleting domain"""
         # Create domain
         d_name = data_utils.rand_name('domain')
         d_desc = data_utils.rand_name('domain-desc')
@@ -118,6 +120,7 @@
 
     @decorators.idempotent_id('d8d318b7-d1b3-4c37-94c5-3c5ba0b121ea')
     def test_domain_delete_cascades_content(self):
+        """Test deleting domain will delete its associated contents"""
         # Create a domain with a user and a group in it
         domain = self.setup_test_domain()
         user = self.create_test_user(domain_id=domain['id'])
@@ -134,6 +137,7 @@
 
     @decorators.idempotent_id('036df86e-bb5d-42c0-a7c2-66b9db3a6046')
     def test_create_domain_with_disabled_status(self):
+        """Test creating domain with disabled status"""
         # Create domain with enabled status as false
         d_name = data_utils.rand_name('domain')
         d_desc = data_utils.rand_name('domain-desc')
@@ -146,10 +150,11 @@
 
     @decorators.idempotent_id('2abf8764-309a-4fa9-bc58-201b799817ad')
     def test_create_domain_without_description(self):
+        """Test creating domain without description"""
         # Create domain only with name
         d_name = data_utils.rand_name('domain')
         domain = self.domains_client.create_domain(name=d_name)['domain']
         self.addCleanup(self.delete_domain, domain['id'])
         expected_data = {'name': d_name, 'enabled': True}
         self.assertEqual('', domain['description'])
-        self.assertDictContainsSubset(expected_data, domain)
+        self.assertLessEqual(expected_data.items(), domain.items())
diff --git a/tempest/api/identity/admin/v3/test_domains_negative.py b/tempest/api/identity/admin/v3/test_domains_negative.py
index b3c68fb..c90206d 100644
--- a/tempest/api/identity/admin/v3/test_domains_negative.py
+++ b/tempest/api/identity/admin/v3/test_domains_negative.py
@@ -20,6 +20,8 @@
 
 
 class DomainsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+    """Negative tests of identity domains"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -28,6 +30,7 @@
     @decorators.attr(type=['negative', 'gate'])
     @decorators.idempotent_id('1f3fbff5-4e44-400d-9ca1-d953f05f609b')
     def test_delete_active_domain(self):
+        """Test deleting active domain should fail"""
         domain = self.create_domain()
         domain_id = domain['id']
 
@@ -40,14 +43,20 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9018461d-7d24-408d-b3fe-ae37e8cd5c9e')
     def test_create_domain_with_empty_name(self):
-        # Domain name should not be empty
+        """Test creating domain with empty name should fail
+
+        Domain name should not be empty
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.domains_client.create_domain, name='')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('37b1bbf2-d664-4785-9a11-333438586eae')
     def test_create_domain_with_name_length_over_64(self):
-        # Domain name length should not ne greater than 64 characters
+        """Test creating domain with name over length
+
+        Domain name length should not ne greater than 64 characters
+        """
         d_name = 'a' * 65
         self.assertRaises(lib_exc.BadRequest,
                           self.domains_client.create_domain,
@@ -56,13 +65,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('43781c07-764f-4cf2-a405-953c1916f605')
     def test_delete_non_existent_domain(self):
-        # Attempt to delete a non existent domain should fail
+        """Test attempting to delete a non existent domain should fail"""
         self.assertRaises(lib_exc.NotFound, self.domains_client.delete_domain,
                           data_utils.rand_uuid_hex())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e6f9e4a2-4f36-4be8-bdbc-4e199ae29427')
     def test_domain_create_duplicate(self):
+        """Test creating domain with duplicate name should fail"""
         domain_name = data_utils.rand_name('domain-dup')
         domain = self.domains_client.create_domain(name=domain_name)['domain']
         domain_id = domain['id']
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 7d85dc9..2fa92e3 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -20,6 +20,8 @@
 
 
 class EndPointGroupsTest(base.BaseIdentityV3AdminTest):
+    """Test endpoint groups"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -68,6 +70,7 @@
 
     @decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
     def test_create_list_show_check_delete_endpoint_group(self):
+        """Test create/list/show/check/delete of endpoint group"""
         service_id = self._create_service()
         self.addCleanup(self.services_client.delete_service, service_id)
         name = data_utils.rand_name('service_group')
@@ -127,6 +130,7 @@
 
     @decorators.idempotent_id('51c8fc38-fa84-4e76-b5b6-6fc37770fb26')
     def test_update_endpoint_group(self):
+        """Test updating endpoint group"""
         # Creating an endpoint group so as to check update endpoint group
         # with new values
         service1_id = self._create_service()
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 366d6a0..0199d73 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -20,6 +20,8 @@
 
 
 class EndPointsTestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone endpoints"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -71,6 +73,7 @@
 
     @decorators.idempotent_id('c19ecf90-240e-4e23-9966-21cee3f6a618')
     def test_list_endpoints(self):
+        """Test listing keystone endpoints by filters"""
         # Get the list of all the endpoints.
         fetched_endpoints = self.client.list_endpoints()['endpoints']
         fetched_endpoint_ids = [e['id'] for e in fetched_endpoints]
@@ -111,6 +114,7 @@
 
     @decorators.idempotent_id('0e2446d2-c1fd-461b-a729-b9e73e3e3b37')
     def test_create_list_show_delete_endpoint(self):
+        """Test creating, listing, showing and deleting keystone endpoint"""
         region_name = data_utils.rand_name('region')
         url = data_utils.rand_url()
         interface = 'public'
@@ -152,6 +156,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('37e8f15e-ee7c-4657-a1e7-f6b61e375eff')
     def test_update_endpoint(self):
+        """Test updating keystone endpoint"""
         # NOTE(zhufl) Service2 should be created before endpoint_for_update
         # is created, because Service2 must be deleted after
         # endpoint_for_update is deleted, otherwise we will get a 404 error
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 164b577..9689d87 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -20,6 +20,8 @@
 
 
 class EndpointsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+    """Negative tests of endpoint"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -48,7 +50,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ac6c137e-4d3d-448f-8c83-4f13d0942651')
     def test_create_with_enabled_False(self):
-        # Enabled should be a boolean, not a string like 'False'
+        """Test creating endpoint with invalid enabled value 'False'
+
+        Enabled should be a boolean, not a string like 'False'
+        """
         interface = 'public'
         url = data_utils.rand_url()
         region = data_utils.rand_name('region')
@@ -59,7 +64,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c43181e-0627-484a-8c79-923e8a59598b')
     def test_create_with_enabled_True(self):
-        # Enabled should be a boolean, not a string like 'True'
+        """Test creating endpoint with invalid enabled value 'True'
+
+        Enabled should be a boolean, not a string like 'True'
+        """
         interface = 'public'
         url = data_utils.rand_url()
         region = data_utils.rand_name('region')
@@ -88,11 +96,17 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('65e41f32-5eb7-498f-a92a-a6ccacf7439a')
     def test_update_with_enabled_False(self):
-        # Enabled should be a boolean, not a string like 'False'
+        """Test updating endpoint with invalid enabled value 'False'
+
+        Enabled should be a boolean, not a string like 'False'
+        """
         self._assert_update_raises_bad_request('False')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('faba3587-f066-4757-a48e-b4a3f01803bb')
     def test_update_with_enabled_True(self):
-        # Enabled should be a boolean, not a string like 'True'
+        """Test updating endpoint with invalid enabled value 'True'
+
+        Enabled should be a boolean, not a string like 'True'
+        """
         self._assert_update_raises_bad_request('True')
diff --git a/tempest/api/identity/admin/v3/test_groups.py b/tempest/api/identity/admin/v3/test_groups.py
index df0d79d..b2e3775 100644
--- a/tempest/api/identity/admin/v3/test_groups.py
+++ b/tempest/api/identity/admin/v3/test_groups.py
@@ -23,6 +23,8 @@
 
 
 class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone groups"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -35,6 +37,7 @@
 
     @decorators.idempotent_id('2e80343b-6c81-4ac3-88c7-452f3e9d5129')
     def test_group_create_update_get(self):
+        """Test creating, updating and getting keystone group"""
         # Verify group creation works.
         name = data_utils.rand_name('Group')
         description = data_utils.rand_name('Description')
@@ -78,6 +81,7 @@
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_group_users_add_list_delete(self):
+        """Test adding/listing/deleting group users"""
         group = self.setup_test_group(domain_id=self.domain['id'])
         # add user into group
         users = []
@@ -104,6 +108,7 @@
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_list_user_groups(self):
+        """Test listing user groups when the user is in two groups"""
         # create a user
         user = self.create_test_user()
         # create two groups, and add user into them
@@ -114,13 +119,20 @@
             self.groups_client.add_group_user(group['id'], user['id'])
         # list groups which user belongs to
         user_groups = self.users_client.list_user_groups(user['id'])['groups']
+        # The `membership_expires_at` attribute is present when listing user
+        # group memberships, and is not an attribute of the groups themselves.
+        # Therefore we remove it from the comparison.
+        for g in user_groups:
+            if 'membership_expires_at' in g:
+                self.assertIsNone(g['membership_expires_at'])
+                del(g['membership_expires_at'])
         self.assertEqual(sorted(groups, key=lambda k: k['name']),
                          sorted(user_groups, key=lambda k: k['name']))
         self.assertEqual(2, len(user_groups))
 
     @decorators.idempotent_id('cc9a57a5-a9ed-4f2d-a29f-4f979a06ec71')
     def test_list_groups(self):
-        # Test to list groups
+        """Test listing groups"""
         group_ids = list()
         fetched_ids = list()
         for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_inherits.py b/tempest/api/identity/admin/v3/test_inherits.py
index 2672f71..cababc6 100644
--- a/tempest/api/identity/admin/v3/test_inherits.py
+++ b/tempest/api/identity/admin/v3/test_inherits.py
@@ -21,6 +21,8 @@
 
 
 class InheritsV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone inherits"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -72,6 +74,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_inherit_assign_list_check_revoke_roles_on_domains_user(self):
+        """Test assign/list/check/revoke inherited role on domain user"""
         # Create role
         src_role = self.setup_test_role()
         # Assign role on domains user
@@ -96,6 +99,7 @@
 
     @decorators.idempotent_id('c7a8dda2-be50-4fb4-9a9c-e830771078b1')
     def test_inherit_assign_list_check_revoke_roles_on_domains_group(self):
+        """Test assign/list/check/revoke inherited role on domain group"""
         # Create role
         src_role = self.setup_test_role()
         # Assign role on domains group
@@ -123,6 +127,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_inherit_assign_check_revoke_roles_on_projects_user(self):
+        """Test assign/list/check/revoke inherited role on project user"""
         # Create role
         src_role = self.setup_test_role()
         # Assign role on projects user
@@ -138,6 +143,7 @@
 
     @decorators.idempotent_id('26021436-d5a4-4256-943c-ded01e0d4b45')
     def test_inherit_assign_check_revoke_roles_on_projects_group(self):
+        """Test assign/list/check/revoke inherited role on project group"""
         # Create role
         src_role = self.setup_test_role()
         # Assign role on projects group
@@ -157,6 +163,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_inherit_assign_list_revoke_user_roles_on_domain(self):
+        """Test assign/list/check/revoke inherited role on domain"""
         # Create role
         src_role = self.setup_test_role()
 
@@ -204,6 +211,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_inherit_assign_list_revoke_user_roles_on_project_tree(self):
+        """Test assign/list/check/revoke inherited role on project tree"""
         # Create role
         src_role = self.setup_test_role()
 
diff --git a/tempest/api/identity/admin/v3/test_list_projects.py b/tempest/api/identity/admin/v3/test_list_projects.py
index cb8ea11..b33d8bd 100644
--- a/tempest/api/identity/admin/v3/test_list_projects.py
+++ b/tempest/api/identity/admin/v3/test_list_projects.py
@@ -40,6 +40,7 @@
 
 
 class ListProjectsTestJSON(BaseListProjectsTestJSON):
+    """Test listing projects"""
 
     @classmethod
     def resource_setup(cls):
@@ -65,13 +66,13 @@
 
     @decorators.idempotent_id('0fe7a334-675a-4509-b00e-1c4b95d5dae8')
     def test_list_projects_with_enabled(self):
-        # List the projects with enabled
+        """Test listing the projects with enabled"""
         self._list_projects_with_params(
             [self.p1], [self.p2, self.p3], {'enabled': False}, 'enabled')
 
     @decorators.idempotent_id('6edc66f5-2941-4a17-9526-4073311c1fac')
     def test_list_projects_with_parent(self):
-        # List projects with parent
+        """Test listing projects with parent"""
         params = {'parent_id': self.p3['parent_id']}
         fetched_projects = self.projects_client.list_projects(
             params)['projects']
@@ -81,6 +82,11 @@
 
 
 class ListProjectsStaticTestJSON(BaseListProjectsTestJSON):
+    """Test listing projects
+
+    These tests can be executed in clouds using the pre-provisioned users
+    """
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -102,7 +108,7 @@
 
     @decorators.idempotent_id('1d830662-22ad-427c-8c3e-4ec854b0af44')
     def test_list_projects(self):
-        # List projects
+        """Test listing projects"""
         list_projects = self.projects_client.list_projects()['projects']
 
         for p in [self.p1, self.p2]:
@@ -112,13 +118,13 @@
 
     @decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
     def test_list_projects_with_name(self):
-        # List projects with name
+        """Test listing projects filtered by name"""
         self._list_projects_with_params(
             [self.p1], [self.p2], {'name': self.p1['name']}, 'name')
 
     @decorators.idempotent_id('fab13f3c-f6a6-4b9f-829b-d32fd44fdf10')
     def test_list_projects_with_domains(self):
-        # Verify project list filtered by domain
+        """Test listing projects filtered by domain"""
         key = 'domain_id'
         for p in [self.p1, self.p2]:
             params = {key: p[key]}
diff --git a/tempest/api/identity/admin/v3/test_list_users.py b/tempest/api/identity/admin/v3/test_list_users.py
index 5aec931..7bd0bcf 100644
--- a/tempest/api/identity/admin/v3/test_list_users.py
+++ b/tempest/api/identity/admin/v3/test_list_users.py
@@ -22,6 +22,7 @@
 
 
 class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test listing keystone users"""
 
     def _list_users_with_params(self, params, key, expected, not_expected):
         # Helper method to list users filtered with params and
@@ -69,7 +70,7 @@
 
     @decorators.idempotent_id('08f9aabb-dcfe-41d0-8172-82b5fa0bd73d')
     def test_list_user_domains(self):
-        # List users with domain
+        """List users with domain"""
         params = {'domain_id': self.domain['id']}
         self._list_users_with_params(params, 'domain_id',
                                      self.domain_enabled_user,
@@ -77,7 +78,7 @@
 
     @decorators.idempotent_id('bff8bf2f-9408-4ef5-b63a-753c8c2124eb')
     def test_list_users_with_not_enabled(self):
-        # List the users with not enabled
+        """List the users with not enabled"""
         params = {'enabled': False}
         self._list_users_with_params(params, 'enabled',
                                      self.non_domain_enabled_user,
@@ -85,7 +86,7 @@
 
     @decorators.idempotent_id('c285bb37-7325-4c02-bff3-3da5d946d683')
     def test_list_users_with_name(self):
-        # List users with name
+        """List users with name"""
         params = {'name': self.domain_enabled_user['name']}
         # When domain specific drivers are enabled the operations
         # of listing all users and listing all groups are not supported,
@@ -98,7 +99,7 @@
 
     @decorators.idempotent_id('b30d4651-a2ea-4666-8551-0c0e49692635')
     def test_list_users(self):
-        # List users
+        """List users"""
         # When domain specific drivers are enabled the operations
         # of listing all users and listing all groups are not supported,
         # they need a domain filter to be specified
@@ -120,7 +121,7 @@
 
     @decorators.idempotent_id('b4baa3ae-ac00-4b4e-9e27-80deaad7771f')
     def test_get_user(self):
-        # Get a user detail
+        """Get a user detail"""
         user = self.users_client.show_user(self.users[0]['id'])['user']
         self.assertEqual(self.users[0]['id'], user['id'])
         self.assertEqual(self.users[0]['name'], user['name'])
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 2908fc4..fb81d0a 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -19,13 +19,14 @@
 
 
 class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone policies"""
 
     def _delete_policy(self, policy_id):
         self.policies_client.delete_policy(policy_id)
 
     @decorators.idempotent_id('1a0ad286-2d06-4123-ab0d-728893a76201')
     def test_list_policies(self):
-        # Test to list policies
+        """Test to list keystone policies"""
         policy_ids = list()
         fetched_ids = list()
         for _ in range(3):
@@ -46,7 +47,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('e544703a-2f03-4cf2-9b0f-350782fdb0d3')
     def test_create_update_delete_policy(self):
-        # Test to update policy
+        """Test to update keystone policy"""
         blob = data_utils.rand_name('BlobName')
         policy_type = data_utils.rand_name('PolicyType')
         policy = self.policies_client.create_policy(blob=blob,
diff --git a/tempest/api/identity/admin/v3/test_project_tags.py b/tempest/api/identity/admin/v3/test_project_tags.py
index b7878a8..eed60af 100644
--- a/tempest/api/identity/admin/v3/test_project_tags.py
+++ b/tempest/api/identity/admin/v3/test_project_tags.py
@@ -25,6 +25,8 @@
 
 
 class IdentityV3ProjectTagsTest(base.BaseIdentityV3AdminTest):
+    """Test keystone project tags"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -34,6 +36,7 @@
     @testtools.skipUnless(CONF.identity_feature_enabled.project_tags,
                           'Project tags not available.')
     def test_list_update_delete_project_tags(self):
+        """Test listing, updating and deleting of project tags"""
         project = self.setup_test_project()
 
         # Create a tag for testing.
diff --git a/tempest/api/identity/admin/v3/test_projects.py b/tempest/api/identity/admin/v3/test_projects.py
index e46145d..be1216a 100644
--- a/tempest/api/identity/admin/v3/test_projects.py
+++ b/tempest/api/identity/admin/v3/test_projects.py
@@ -23,6 +23,8 @@
 
 
 class ProjectsTestJSON(base.BaseIdentityV3AdminTest):
+    """Test identity projects"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -30,7 +32,7 @@
 
     @decorators.idempotent_id('0ecf465c-0dc4-4532-ab53-91ffeb74d12d')
     def test_project_create_with_description(self):
-        # Create project with a description
+        """Test creating project with a description"""
         project_desc = data_utils.rand_name('desc')
         project = self.setup_test_project(description=project_desc)
         project_id = project['id']
@@ -44,7 +46,7 @@
 
     @decorators.idempotent_id('5f50fe07-8166-430b-a882-3b2ee0abe26f')
     def test_project_create_with_domain(self):
-        # Create project with a domain
+        """Test creating project with a domain"""
         domain = self.setup_test_domain()
         project_name = data_utils.rand_name('project')
         project = self.setup_test_project(
@@ -58,7 +60,7 @@
 
     @decorators.idempotent_id('1854f9c0-70bc-4d11-a08a-1c789d339e3d')
     def test_project_create_with_parent(self):
-        # Create root project without providing a parent_id
+        """Test creating root project without providing a parent_id"""
         domain = self.setup_test_domain()
         domain_id = domain['id']
 
@@ -83,6 +85,7 @@
 
     @decorators.idempotent_id('a7eb9416-6f9b-4dbb-b71b-7f73aaef59d5')
     def test_create_is_domain_project(self):
+        """Test creating is_domain project"""
         project = self.setup_test_project(domain_id=None, is_domain=True)
         # To delete a domain, we need to disable it first
         self.addCleanup(self.projects_client.update_project, project['id'],
@@ -103,7 +106,7 @@
 
     @decorators.idempotent_id('1f66dc76-50cc-4741-a200-af984509e480')
     def test_project_create_enabled(self):
-        # Create a project that is enabled
+        """Test creating a project that is enabled"""
         project = self.setup_test_project(enabled=True)
         project_id = project['id']
         self.assertTrue(project['enabled'],
@@ -113,7 +116,7 @@
 
     @decorators.idempotent_id('78f96a9c-e0e0-4ee6-a3ba-fbf6dfd03207')
     def test_project_create_not_enabled(self):
-        # Create a project that is not enabled
+        """Test creating a project that is not enabled"""
         project = self.setup_test_project(enabled=False)
         self.assertFalse(project['enabled'],
                          'Enable should be False in response')
@@ -123,7 +126,7 @@
 
     @decorators.idempotent_id('f608f368-048c-496b-ad63-d286c26dab6b')
     def test_project_update_name(self):
-        # Update name attribute of a project
+        """Test updating name attribute of a project"""
         p_name1 = data_utils.rand_name('project')
         project = self.setup_test_project(name=p_name1)
 
@@ -144,7 +147,7 @@
 
     @decorators.idempotent_id('f138b715-255e-4a7d-871d-351e1ef2e153')
     def test_project_update_desc(self):
-        # Update description attribute of a project
+        """Test updating description attribute of a project"""
         p_desc = data_utils.rand_name('desc')
         project = self.setup_test_project(description=p_desc)
         resp1_desc = project['description']
@@ -164,7 +167,7 @@
 
     @decorators.idempotent_id('b6b25683-c97f-474d-a595-55d410b68100')
     def test_project_update_enable(self):
-        # Update the enabled attribute of a project
+        """Test updating the enabled attribute of a project"""
         p_en = False
         project = self.setup_test_project(enabled=p_en)
 
@@ -189,7 +192,7 @@
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_associate_user_to_project(self):
-        # Associate a user to a project
+        """Test associating a user to a project"""
         # Create a Project
         project = self.setup_test_project()
 
@@ -215,6 +218,7 @@
 
     @decorators.idempotent_id('d1db68b6-aebe-4fa0-b79d-d724d2e21162')
     def test_project_get_equals_list(self):
+        """Test the result of getting project equals that of listing"""
         fields = ['parent_id', 'is_domain', 'description', 'links',
                   'name', 'enabled', 'domain_id', 'id', 'tags']
 
diff --git a/tempest/api/identity/admin/v3/test_projects_negative.py b/tempest/api/identity/admin/v3/test_projects_negative.py
index 12f1d4a..79e3d29 100644
--- a/tempest/api/identity/admin/v3/test_projects_negative.py
+++ b/tempest/api/identity/admin/v3/test_projects_negative.py
@@ -20,11 +20,12 @@
 
 
 class ProjectsNegativeTestJSON(base.BaseIdentityV3AdminTest):
+    """Negative tests of projects"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8d68c012-89e0-4394-8d6b-ccd7196def97')
     def test_project_delete_by_unauthorized_user(self):
-        # Non-admin user should not be able to delete a project
+        """Non-admin user should not be able to delete a project"""
         project = self.setup_test_project()
         self.assertRaises(
             lib_exc.Forbidden, self.non_admin_projects_client.delete_project,
@@ -32,6 +33,11 @@
 
 
 class ProjectsNegativeStaticTestJSON(base.BaseIdentityV3AdminTest):
+    """Negative tests of projects
+
+    These tests can be executed in clouds using the pre-provisioned users
+    """
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -40,14 +46,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('24c49279-45dd-4155-887a-cb738c2385aa')
     def test_list_projects_by_unauthorized_user(self):
-        # Non-admin user should not be able to list projects
+        """Non-admin user should not be able to list projects"""
         self.assertRaises(lib_exc.Forbidden,
                           self.non_admin_projects_client.list_projects)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('874c3e84-d174-4348-a16b-8c01f599561b')
     def test_project_create_duplicate(self):
-        # Project names should be unique
+        """Project names should be unique"""
         project_name = data_utils.rand_name('project-dup')
         self.setup_test_project(name=project_name)
 
@@ -57,7 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8fba9de2-3e1f-4e77-812a-60cb68f8df13')
     def test_create_project_by_unauthorized_user(self):
-        # Non-admin user should not be authorized to create a project
+        """Non-admin user should not be authorized to create a project"""
         project_name = data_utils.rand_name('project')
         self.assertRaises(
             lib_exc.Forbidden, self.non_admin_projects_client.create_project,
@@ -66,14 +72,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7828db17-95e5-475b-9432-9a51b4aa79a9')
     def test_create_project_with_empty_name(self):
-        # Project name should not be empty
+        """Project name should not be empty"""
         self.assertRaises(lib_exc.BadRequest,
                           self.projects_client.create_project, name='')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('502b6ceb-b0c8-4422-bf53-f08fdb21e2f0')
     def test_create_projects_name_length_over_64(self):
-        # Project name length should not be greater than 64 characters
+        """Project name length should not be greater than 64 characters"""
         project_name = 'a' * 65
         self.assertRaises(lib_exc.BadRequest,
                           self.projects_client.create_project, project_name)
@@ -81,7 +87,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7965b581-60c1-43b7-8169-95d4ab7fc6fb')
     def test_delete_non_existent_project(self):
-        # Attempt to delete a non existent project should fail
+        """Attempt to delete a non existent project should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.projects_client.delete_project,
                           data_utils.rand_uuid_hex())
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
index c8c0151..63e456e 100644
--- a/tempest/api/identity/admin/v3/test_regions.py
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -20,6 +20,8 @@
 
 
 class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+    """Test regions"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -44,6 +46,7 @@
 
     @decorators.idempotent_id('56186092-82e4-43f2-b954-91013218ba42')
     def test_create_update_get_delete_region(self):
+        """Test creating, updating, getting and updating region"""
         # Create region
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
@@ -81,7 +84,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2c12c5b5-efcf-4aa5-90c5-bff1ab0cdbe2')
     def test_create_region_with_specific_id(self):
-        # Create a region with a specific id
+        """Test creating region with specific id"""
         r_region_id = data_utils.rand_uuid()
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
@@ -93,7 +96,7 @@
 
     @decorators.idempotent_id('d180bf99-544a-445c-ad0d-0c0d27663796')
     def test_list_regions(self):
-        # Get a list of regions
+        """Test getting a list of regions"""
         fetched_regions = self.client.list_regions()['regions']
         missing_regions =\
             [e for e in self.setup_regions if e not in fetched_regions]
@@ -104,6 +107,7 @@
 
     @decorators.idempotent_id('2d1057cb-bbde-413a-acdf-e2d265284542')
     def test_list_regions_filter_by_parent_region_id(self):
+        """Test listing regions filtered by parent region id"""
         # Add a sub-region to one of the existing test regions
         r_description = data_utils.rand_name('description')
         region = self.client.create_region(
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index 5ba4c9f..e5137f4 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -25,6 +25,8 @@
 
 
 class RolesV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test roles"""
+
     # NOTE: force_tenant_isolation is true in the base class by default but
     # overridden to false here to allow test execution for clouds using the
     # pre-provisioned credentials provider.
@@ -75,6 +77,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')
     def test_role_create_update_show_list(self):
+        """Test creating, updating, showing and listing a role"""
         r_name = data_utils.rand_name('Role')
         role = self.roles_client.create_role(name=r_name)['role']
         self.addCleanup(self.roles_client.delete_role, role['id'])
@@ -101,6 +104,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_grant_list_revoke_role_to_user_on_project(self):
+        """Test granting, listing, revoking role to user on project"""
         self.roles_client.create_user_role_on_project(self.project['id'],
                                                       self.user_body['id'],
                                                       self.role['id'])
@@ -122,6 +126,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_grant_list_revoke_role_to_user_on_domain(self):
+        """Test granting, listing, revoking role to user on domain"""
         self.roles_client.create_user_role_on_domain(
             self.domain['id'], self.user_body['id'], self.role['id'])
 
@@ -137,11 +142,32 @@
         self.roles_client.delete_role_from_user_on_domain(
             self.domain['id'], self.user_body['id'], self.role['id'])
 
+    @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+                      'Skipped because environment has an immutable user '
+                      'source and solely provides read-only access to users.')
+    @decorators.idempotent_id('e5a81737-d294-424d-8189-8664858aae4c')
+    def test_grant_list_revoke_role_to_user_on_system(self):
+        self.roles_client.create_user_role_on_system(
+            self.user_body['id'], self.role['id'])
+
+        roles = self.roles_client.list_user_roles_on_system(
+            self.user_body['id'])['roles']
+
+        self.assertEqual(1, len(roles))
+        self.assertEqual(self.role['id'], roles[0]['id'])
+
+        self.roles_client.check_user_role_existence_on_system(
+            self.user_body['id'], self.role['id'])
+
+        self.roles_client.delete_role_from_user_on_system(
+            self.user_body['id'], self.role['id'])
+
     @decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_grant_list_revoke_role_to_group_on_project(self):
+        """Test granting, listing, revoking role to group on project"""
         # Grant role to group on project
         self.roles_client.create_group_role_on_project(
             self.project['id'], self.group_body['id'], self.role['id'])
@@ -175,6 +201,7 @@
 
     @decorators.idempotent_id('4bf8a70b-e785-413a-ad53-9f91ce02faa7')
     def test_grant_list_revoke_role_to_group_on_domain(self):
+        """Test granting, listing, revoking role to group on domain"""
         self.roles_client.create_group_role_on_domain(
             self.domain['id'], self.group_body['id'], self.role['id'])
 
@@ -190,8 +217,26 @@
         self.roles_client.delete_role_from_group_on_domain(
             self.domain['id'], self.group_body['id'], self.role['id'])
 
+    @decorators.idempotent_id('c888fe4f-8018-48db-b959-542225c1b4b6')
+    def test_grant_list_revoke_role_to_group_on_system(self):
+        self.roles_client.create_group_role_on_system(
+            self.group_body['id'], self.role['id'])
+
+        roles = self.roles_client.list_group_roles_on_system(
+            self.group_body['id'])['roles']
+
+        self.assertEqual(1, len(roles))
+        self.assertEqual(self.role['id'], roles[0]['id'])
+
+        self.roles_client.check_role_from_group_on_system_existence(
+            self.group_body['id'], self.role['id'])
+
+        self.roles_client.delete_role_from_group_on_system(
+            self.group_body['id'], self.role['id'])
+
     @decorators.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
     def test_list_roles(self):
+        """Test listing roles"""
         # Return a list of all roles
         body = self.roles_client.list_roles()['roles']
         found = [role for role in body if role in self.roles]
@@ -215,6 +260,7 @@
 
     @decorators.idempotent_id('c90c316c-d706-4728-bcba-eb1912081b69')
     def test_implied_roles_create_check_show_delete(self):
+        """Test creating, checking, showing and deleting implied roles"""
         prior_role_id = self.roles[0]['id']
         implies_role_id = self.roles[1]['id']
 
@@ -248,6 +294,7 @@
 
     @decorators.idempotent_id('dc6f5959-b74d-4e30-a9e5-a8255494ff00')
     def test_roles_hierarchy(self):
+        """Test creating implied role and listing role inferences rules"""
         # Create inference rule from "roles[0]" to "role[1]"
         self._create_implied_role(
             self.roles[0]['id'], self.roles[1]['id'])
@@ -280,6 +327,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_assignments_for_implied_roles_create_delete(self):
+        """Test assignments when implied roles are created and deleted"""
         # Create a grant using "roles[0]"
         self.roles_client.create_user_role_on_project(
             self.project['id'], self.user_body['id'], self.roles[0]['id'])
@@ -321,6 +369,7 @@
 
     @decorators.idempotent_id('d92a41d2-5501-497a-84bb-6e294330e8f8')
     def test_domain_roles_create_delete(self):
+        """Test creating, listing and deleting domain roles"""
         domain_role = self.roles_client.create_role(
             name=data_utils.rand_name('domain_role'),
             domain_id=self.domain['id'])['role']
@@ -341,6 +390,7 @@
 
     @decorators.idempotent_id('eb1e1c24-1bc4-4d47-9748-e127a1852c82')
     def test_implied_domain_roles(self):
+        """Test creating implied roles when roles are in domains"""
         # Create two roles in the same domain
         domain_role1 = self.setup_test_role(domain_id=self.domain['id'])
         domain_role2 = self.setup_test_role(domain_id=self.domain['id'])
@@ -373,6 +423,7 @@
                       'Skipped because environment has an immutable user '
                       'source and solely provides read-only access to users.')
     def test_assignments_for_domain_roles(self):
+        """Test assignments for domain roles"""
         domain_role = self.setup_test_role(domain_id=self.domain['id'])
 
         # Create a grant using "domain_role"
@@ -395,6 +446,7 @@
 
     @decorators.idempotent_id('3748c316-c18f-4b08-997b-c60567bc6235')
     def test_list_all_implied_roles(self):
+        """Test listing all implied roles"""
         # Create inference rule from "roles[0]" to "roles[1]"
         self._create_implied_role(
             self.roles[0]['id'], self.roles[1]['id'])
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index 5afeb98..fb3b03e 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -20,6 +20,7 @@
 
 
 class ServicesTestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone services"""
 
     def _del_service(self, service_id):
         # Used for deleting the services created in this class
@@ -31,6 +32,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('5193aad5-bcb7-411d-85b0-b3b61b96ef06')
     def test_create_update_get_service(self):
+        """Test creating, updating and getting of keystone service"""
         # Creating a Service
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
@@ -42,7 +44,7 @@
 
         # Verifying response body of create service
         expected_data = {'name': name, 'type': serv_type, 'description': desc}
-        self.assertDictContainsSubset(expected_data, create_service)
+        self.assertLessEqual(expected_data.items(), create_service.items())
 
         # Update description
         s_id = create_service['id']
@@ -59,22 +61,22 @@
         resp3_desc = fetched_service['description']
 
         self.assertEqual(resp2_desc, resp3_desc)
-        self.assertDictContainsSubset(update_service, fetched_service)
+        self.assertLessEqual(update_service.items(), fetched_service.items())
 
     @decorators.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
     def test_create_service_without_description(self):
-        # Create a service only with name and type
+        """Create a keystone service only with name and type"""
         name = data_utils.rand_name('service')
         serv_type = data_utils.rand_name('type')
         service = self.services_client.create_service(
             type=serv_type, name=name)['service']
         self.addCleanup(self.services_client.delete_service, service['id'])
         expected_data = {'name': name, 'type': serv_type}
-        self.assertDictContainsSubset(expected_data, service)
+        self.assertLessEqual(expected_data.items(), service.items())
 
     @decorators.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
     def test_list_services(self):
-        # Create, List, Verify and Delete Services
+        """Create, List, Verify and Delete Keystone Services"""
         service_ids = list()
         service_types = list()
         for _ in range(3):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 5f1b58d..5bbd65c 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.identity import base
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -24,6 +22,7 @@
 
 
 class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test tokens"""
 
     credentials = ['primary', 'admin', 'alt']
 
@@ -69,8 +68,8 @@
         orig_expires_at = token_auth['token']['expires_at']
         orig_user = token_auth['token']['user']
 
-        self.assertIsInstance(token_auth['token']['expires_at'], six.text_type)
-        self.assertIsInstance(token_auth['token']['issued_at'], six.text_type)
+        self.assertIsInstance(token_auth['token']['expires_at'], str)
+        self.assertIsInstance(token_auth['token']['issued_at'], str)
         self.assertEqual(['password'], token_auth['token']['methods'])
         self.assertEqual(user['id'], token_auth['token']['user']['id'])
         self.assertEqual(user['name'], token_auth['token']['user']['name'])
@@ -90,7 +89,7 @@
 
         self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
                          'Expiration time should match original token')
-        self.assertIsInstance(token_auth['token']['issued_at'], six.text_type)
+        self.assertIsInstance(token_auth['token']['issued_at'], str)
         self.assertEqual(set(['password', 'token']),
                          set(token_auth['token']['methods']))
         self.assertEqual(orig_user, token_auth['token']['user'],
@@ -123,6 +122,7 @@
 
     @decorators.idempotent_id('08ed85ce-2ba8-4864-b442-bcc61f16ae89')
     def test_get_available_project_scopes(self):
+        """Test getting available project scopes"""
         manager_project_id = self.os_primary.credentials.project_id
         admin_user_id = self.os_admin.credentials.user_id
         admin_role_id = self.get_role_by_name(CONF.identity.admin_role)['id']
@@ -152,10 +152,13 @@
 
     @decorators.idempotent_id('ec5ecb05-af64-4c04-ac86-4d9f6f12f185')
     def test_get_available_domain_scopes(self):
-        # Test for verifying that listing domain scopes for a user works if
-        # the user has a domain role or belongs to a group that has a domain
-        # role. For this test, admin client is used to add roles to alt user,
-        # which performs API calls, to avoid 401 Unauthorized errors.
+        """Test getting available domain scopes
+
+        To verify that listing domain scopes for a user works if
+        the user has a domain role or belongs to a group that has a domain
+        role. For this test, admin client is used to add roles to alt user,
+        which performs API calls, to avoid 401 Unauthorized errors.
+        """
         alt_user_id = self.os_alt.credentials.user_id
 
         def _create_user_domain_role_for_alt_user():
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index 78e3cce..580e304 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -27,6 +27,7 @@
 
 
 class TrustsV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone trusts"""
 
     @classmethod
     def skip_checks(cls):
@@ -195,8 +196,11 @@
 
     @decorators.idempotent_id('5a0a91a4-baef-4a14-baba-59bf4d7fcace')
     def test_trust_impersonate(self):
-        # Test case to check we can create, get and delete a trust
-        # updates are not supported for trusts
+        """Test keystone trust with impersonation enabled
+
+        To check we can create, get and delete a trust.
+        Updates are not supported for trusts
+        """
         trust = self.create_trust()
         self.validate_trust(trust)
 
@@ -207,8 +211,11 @@
 
     @decorators.idempotent_id('ed2a8779-a7ac-49dc-afd7-30f32f936ed2')
     def test_trust_noimpersonate(self):
-        # Test case to check we can create, get and delete a trust
-        # with impersonation=False
+        """Test keystone trust with impersonation disabled
+
+        To check we can create, get and delete a trust
+        with impersonation=False
+        """
         trust = self.create_trust(impersonate=False)
         self.validate_trust(trust, impersonate=False)
 
@@ -219,8 +226,11 @@
 
     @decorators.idempotent_id('0ed14b66-cefd-4b5c-a964-65759453e292')
     def test_trust_expire(self):
-        # Test case to check we can create, get and delete a trust
-        # with an expiry specified
+        """Test expire attribute of keystone trust
+
+        To check we can create, get and delete a trust
+        with an expiry specified
+        """
         expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
         # NOTE(ylobankov) In some cases the expiry time may be rounded up
         # because of microseconds. In fact, it depends on database and its
@@ -246,8 +256,10 @@
 
     @decorators.idempotent_id('3e48f95d-e660-4fa9-85e0-5a3d85594384')
     def test_trust_expire_invalid(self):
-        # Test case to check we can check an invalid expiry time
-        # is rejected with the correct error
+        """Test invalid expire attribute of a keystone trust
+
+        To check an invalid expiry time is rejected with the correct error
+        """
         # with an expiry specified
         expires_str = 'bad.123Z'
         self.assertRaises(lib_exc.BadRequest,
@@ -256,6 +268,7 @@
 
     @decorators.idempotent_id('6268b345-87ca-47c0-9ce3-37792b43403a')
     def test_get_trusts_query(self):
+        """Test getting keystone trusts"""
         self.create_trust()
         trusts_get = self.trustor_client.list_trusts(
             trustor_user_id=self.trustor_user_id)['trusts']
@@ -265,7 +278,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('4773ebd5-ecbf-4255-b8d8-b63e6f72b65d')
     def test_get_trusts_all(self):
-
+        """Test getting all keystone trusts"""
         # Simple function that can be used for cleanup
         def set_scope(auth_provider, scope):
             auth_provider.scope = scope
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index 8955a93..31cbbac 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -27,6 +27,7 @@
 
 
 class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
+    """Test keystone users"""
 
     @classmethod
     def skip_checks(cls):
@@ -38,7 +39,7 @@
 
     @decorators.idempotent_id('b537d090-afb9-4519-b95d-270b0708e87e')
     def test_user_update(self):
-        # Test case to check if updating of user attributes is successful.
+        """Test case to check if updating of user attributes is successful"""
         # Creating first user
         u_name = data_utils.rand_name('user')
         u_desc = u_name + 'description'
@@ -72,6 +73,7 @@
 
     @decorators.idempotent_id('2d223a0e-e457-4a70-9fb1-febe027a0ff9')
     def test_update_user_password(self):
+        """Test updating user password"""
         # Creating User to check password updation
         u_name = data_utils.rand_name('user')
         original_password = data_utils.rand_password()
@@ -98,7 +100,7 @@
 
     @decorators.idempotent_id('a831e70c-e35b-430b-92ed-81ebbc5437b8')
     def test_list_user_projects(self):
-        # List the projects that a user has access upon
+        """Test listing the projects that a user has access upon"""
         assigned_project_ids = list()
         fetched_project_ids = list()
         u_project = self.setup_test_project()
@@ -141,7 +143,7 @@
 
     @decorators.idempotent_id('c10dcd90-461d-4b16-8e23-4eb836c00644')
     def test_get_user(self):
-        # Get a user detail
+        """Test getting a user detail"""
         user = self.setup_test_user()
         fetched_user = self.users_client.show_user(user['id'])['user']
         self.assertEqual(user['id'], fetched_user['id'])
@@ -150,6 +152,7 @@
                           'Security compliance not available.')
     @decorators.idempotent_id('568cd46c-ee6c-4ab4-a33a-d3791931979e')
     def test_password_history_not_enforced_in_admin_reset(self):
+        """Test setting same password when password history is not enforced"""
         old_password = self.os_primary.credentials.password
         user_id = self.os_primary.credentials.user_id
 
diff --git a/tempest/api/identity/admin/v3/test_users_negative.py b/tempest/api/identity/admin/v3/test_users_negative.py
index 11dcdb0..1cba945 100644
--- a/tempest/api/identity/admin/v3/test_users_negative.py
+++ b/tempest/api/identity/admin/v3/test_users_negative.py
@@ -23,11 +23,12 @@
 
 
 class UsersNegativeTest(base.BaseIdentityV3AdminTest):
+    """Negative tests of keystone users"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e75f006c-89cc-477b-874d-588e4eab4b17')
     def test_create_user_for_non_existent_domain(self):
-        # Attempt to create a user in a non-existent domain should fail
+        """Attempt to create a user in a non-existent domain should fail"""
         u_name = data_utils.rand_name('user')
         u_email = u_name + '@testmail.tm'
         u_password = data_utils.rand_password()
@@ -39,7 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b3c9fccc-4134-46f5-b600-1da6fb0a3b1f')
     def test_authentication_for_disabled_user(self):
-        # Attempt to authenticate for disabled user should fail
+        """Attempt to authenticate for disabled user should fail"""
         password = data_utils.rand_password()
         user = self.setup_test_user(password)
         self.disable_user(user['name'], user['domain_id'])
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 282343c..5722f0e 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -192,6 +192,7 @@
             cls.os_primary.identity_versions_v3_client
         cls.non_admin_app_creds_client = \
             cls.os_primary.application_credentials_client
+        cls.non_admin_access_rules_client = cls.os_primary.access_rules_client
 
 
 class BaseIdentityV3AdminTest(BaseIdentityV3Test):
diff --git a/tempest/api/identity/v2/test_api_discovery.py b/tempest/api/identity/v2/test_api_discovery.py
index 5b9d38c..afda104 100644
--- a/tempest/api/identity/v2/test_api_discovery.py
+++ b/tempest/api/identity/v2/test_api_discovery.py
@@ -18,11 +18,12 @@
 
 
 class TestApiDiscovery(base.BaseIdentityV2Test):
-    """Tests for API discovery features."""
+    """Tests for identity v2 API discovery features."""
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('ea889a68-a15f-4166-bfb1-c12456eae853')
     def test_api_version_resources(self):
+        """Test showing identity v2 api version resources"""
         descr = self.non_admin_client.show_api_description()['version']
         expected_resources = ('id', 'links', 'media-types', 'status',
                               'updated')
@@ -34,6 +35,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('007a0be0-78fe-4fdb-bbee-e9216cc17bb2')
     def test_api_media_types(self):
+        """Test showing identity v2 api version media type"""
         descr = self.non_admin_client.show_api_description()['version']
         # Get MIME type bases and descriptions
         media_types = [(media_type['base'], media_type['type']) for
@@ -49,6 +51,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('77fd6be0-8801-48e6-b9bf-38cdd2f253ec')
     def test_api_version_statuses(self):
+        """Test showing identity v2 api version status"""
         descr = self.non_admin_client.show_api_description()['version']
         status = descr['status'].lower()
         supported_statuses = ['current', 'stable', 'experimental',
diff --git a/tempest/api/identity/v2/test_extension.py b/tempest/api/identity/v2/test_extension.py
index c538c14..13555bd 100644
--- a/tempest/api/identity/v2/test_extension.py
+++ b/tempest/api/identity/v2/test_extension.py
@@ -18,10 +18,11 @@
 
 
 class ExtensionTestJSON(base.BaseIdentityV2Test):
+    """Test extensions in identity v2 API"""
 
     @decorators.idempotent_id('85f3f661-f54c-4d48-b563-72ae952b9383')
     def test_list_extensions(self):
-        # List all the extensions
+        """List all the identity extensions via v2 API"""
         body = self.non_admin_client.list_extensions()['extensions']['values']
         self.assertNotEmpty(body)
         keys = ['name', 'updated', 'alias', 'links',
diff --git a/tempest/api/identity/v2/test_tenants.py b/tempest/api/identity/v2/test_tenants.py
index b2a6d13..1752b65 100644
--- a/tempest/api/identity/v2/test_tenants.py
+++ b/tempest/api/identity/v2/test_tenants.py
@@ -19,11 +19,13 @@
 
 
 class IdentityTenantsTest(base.BaseIdentityV2Test):
+    """Test listing tenants in identity v2 API"""
 
     credentials = ['primary', 'alt']
 
     @decorators.idempotent_id('ecae2459-243d-4ba1-ad02-65f15dc82b78')
     def test_list_tenants_returns_only_authorized_tenants(self):
+        """Test listing tenants only returns authorized tenants via v2 API"""
         alt_tenant_name = self.os_alt.credentials.tenant_name
         resp = self.non_admin_tenants_client.list_tenants()
 
diff --git a/tempest/api/identity/v2/test_tokens.py b/tempest/api/identity/v2/test_tokens.py
index 64b81c2..d3776b8 100644
--- a/tempest/api/identity/v2/test_tokens.py
+++ b/tempest/api/identity/v2/test_tokens.py
@@ -14,16 +14,16 @@
 #    under the License.
 
 from oslo_utils import timeutils
-import six
 from tempest.api.identity import base
 from tempest.lib import decorators
 
 
 class TokensTest(base.BaseIdentityV2Test):
+    """Test tokens in identity v2 API"""
 
     @decorators.idempotent_id('65ae3b78-91ff-467b-a705-f6678863b8ec')
     def test_create_token(self):
-
+        """Test creating token for user via v2 API"""
         token_client = self.non_admin_token_client
 
         # get a token for the user
@@ -35,7 +35,7 @@
         body = token_client.auth(username, password, tenant_name)
 
         self.assertNotEmpty(body['token']['id'])
-        self.assertIsInstance(body['token']['id'], six.string_types)
+        self.assertIsInstance(body['token']['id'], str)
 
         now = timeutils.utcnow()
         expires_at = timeutils.normalize_time(
diff --git a/tempest/api/identity/v2/test_users.py b/tempest/api/identity/v2/test_users.py
index 2eea860..a63b45c 100644
--- a/tempest/api/identity/v2/test_users.py
+++ b/tempest/api/identity/v2/test_users.py
@@ -28,6 +28,7 @@
 
 
 class IdentityUsersTest(base.BaseIdentityV2Test):
+    """Test user password in identity v2 API"""
 
     @classmethod
     def resource_setup(cls):
@@ -85,6 +86,7 @@
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_user_update_own_password(self):
+        """test updating user's own password via v2 API"""
         old_pass = self.creds.password
         old_token = self.non_admin_users_client.token
         new_pass = data_utils.rand_password()
diff --git a/tempest/api/identity/v3/test_access_rules.py b/tempest/api/identity/v3/test_access_rules.py
new file mode 100644
index 0000000..608eb59
--- /dev/null
+++ b/tempest/api/identity/v3/test_access_rules.py
@@ -0,0 +1,84 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+CONF = config.CONF
+
+
+class AccessRulesV3Test(base.BaseIdentityV3Test):
+
+    @classmethod
+    def skip_checks(cls):
+        super(AccessRulesV3Test, cls).skip_checks()
+        if not CONF.identity_feature_enabled.access_rules:
+            raise cls.skipException("Application credential access rules are "
+                                    "not available in this environment")
+
+    @classmethod
+    def resource_setup(cls):
+        super(AccessRulesV3Test, cls).resource_setup()
+        cls.user_id = cls.os_primary.credentials.user_id
+        cls.project_id = cls.os_primary.credentials.project_id
+
+    def setUp(self):
+        super(AccessRulesV3Test, self).setUp()
+        ac = self.non_admin_app_creds_client
+        access_rules = [
+            {
+                "path": "/v2.1/servers/*/ips",
+                "method": "GET",
+                "service": "compute"
+            }
+        ]
+        self.app_cred = ac.create_application_credential(
+            self.user_id,
+            name=data_utils.rand_name('application_credential'),
+            access_rules=access_rules
+        )['application_credential']
+
+    @decorators.idempotent_id('2354c498-5119-4ba5-9f0d-44f16f78fb0e')
+    def test_list_access_rules(self):
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(1, len(ar['access_rules']))
+
+    @decorators.idempotent_id('795dd507-ca1e-40e9-ba90-ff0a08689ba4')
+    def test_show_access_rule(self):
+        access_rule_id = self.app_cred['access_rules'][0]['id']
+        self.non_admin_access_rules_client.show_access_rule(
+            self.user_id, access_rule_id)
+
+    @decorators.idempotent_id('278757e9-e193-4bf8-adf2-0b0a229a17d0')
+    def test_delete_access_rule(self):
+        access_rule_id = self.app_cred['access_rules'][0]['id']
+        app_cred_id = self.app_cred['id']
+        self.assertRaises(
+            lib_exc.Forbidden,
+            self.non_admin_access_rules_client.delete_access_rule,
+            self.user_id,
+            access_rule_id)
+        self.non_admin_app_creds_client.delete_application_credential(
+            self.user_id, app_cred_id)
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(1, len(ar['access_rules']))
+        self.non_admin_access_rules_client.delete_access_rule(
+            self.user_id, access_rule_id)
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(0, len(ar['access_rules']))
diff --git a/tempest/api/identity/v3/test_api_discovery.py b/tempest/api/identity/v3/test_api_discovery.py
index e87d1cd..ebb96fd 100644
--- a/tempest/api/identity/v3/test_api_discovery.py
+++ b/tempest/api/identity/v3/test_api_discovery.py
@@ -22,10 +22,11 @@
 
 
 class TestApiDiscovery(base.BaseIdentityV3Test):
-    """Tests for API discovery features."""
+    """Tests for identity API discovery features."""
 
     @decorators.idempotent_id('79aec9ae-710f-4c54-a4fc-3aa25b4feac3')
     def test_identity_v3_existence(self):
+        """Test that identity v3 version should exist"""
         versions = self.non_admin_versions_client.list_versions()
         found = any(
             "v3" in version.get('id')
@@ -35,9 +36,12 @@
     @decorators.idempotent_id('721f480f-35b6-46c7-846e-047e6acea0dc')
     @decorators.attr(type='smoke')
     def test_list_api_versions(self):
-        # NOTE: Actually this API doesn't depend on v3 API at all, because
-        # the API operation is "GET /" without v3's endpoint. The reason of
-        # this test path is just v3 API is CURRENT on Keystone side.
+        """Test listing identity api versions
+
+        NOTE: Actually this API doesn't depend on v3 API at all, because
+        the API operation is "GET /" without v3's endpoint. The reason of
+        this test path is just v3 API is CURRENT on Keystone side.
+        """
         versions = self.non_admin_versions_client.list_versions()
         expected_resources = ('id', 'links', 'media-types', 'status',
                               'updated')
@@ -49,6 +53,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('b9232f5e-d9e5-4d97-b96c-28d3db4de1bd')
     def test_api_version_resources(self):
+        """Test showing identity v3 api version resources"""
         descr = self.non_admin_client.show_api_description()['version']
         expected_resources = ('id', 'links', 'media-types', 'status',
                               'updated')
@@ -60,6 +65,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('657c1970-4722-4189-8831-7325f3bc4265')
     def test_api_media_types(self):
+        """Test showing identity v3 api version media type"""
         descr = self.non_admin_client.show_api_description()['version']
         # Get MIME type bases and descriptions
         media_types = [(media_type['base'], media_type['type']) for
@@ -75,6 +81,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('8879a470-abfb-47bb-bb8d-5a7fd279ad1e')
     def test_api_version_statuses(self):
+        """Test showing identity v3 api version status"""
         descr = self.non_admin_client.show_api_description()['version']
         status = descr['status'].lower()
         supported_statuses = ['current', 'stable', 'experimental',
diff --git a/tempest/api/identity/v3/test_application_credentials.py b/tempest/api/identity/v3/test_application_credentials.py
index 1cee902..06734aa 100644
--- a/tempest/api/identity/v3/test_application_credentials.py
+++ b/tempest/api/identity/v3/test_application_credentials.py
@@ -19,10 +19,14 @@
 from oslo_utils import timeutils
 
 from tempest.api.identity import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class ApplicationCredentialsV3Test(base.BaseApplicationCredentialsV3Test):
+    """Test application credentials"""
 
     def _list_app_creds(self, name=None):
         kwargs = dict(user_id=self.user_id)
@@ -33,6 +37,7 @@
 
     @decorators.idempotent_id('8080c75c-eddc-4786-941a-c2da7039ae61')
     def test_create_application_credential(self):
+        """Test creating application credential"""
         app_cred = self.create_application_credential()
 
         # Check that the secret appears in the create response
@@ -46,7 +51,7 @@
         self.assertNotIn('secret', app_cred)
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
@@ -55,6 +60,7 @@
 
     @decorators.idempotent_id('852daf0c-42b5-4239-8466-d193d0543ed3')
     def test_create_application_credential_expires(self):
+        """Test creating application credential with expire time"""
         expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
 
         app_cred = self.create_application_credential(expires_at=expires_at)
@@ -62,8 +68,27 @@
         expires_str = expires_at.isoformat()
         self.assertEqual(expires_str, app_cred['expires_at'])
 
+    @decorators.idempotent_id('529936eb-aa5d-463d-9f79-01c113d3b88f')
+    def test_create_application_credential_access_rules(self):
+        if not CONF.identity_feature_enabled.access_rules:
+            raise self.skipException("Application credential access rules are "
+                                     "not available in this environment")
+        access_rules = [
+            {
+                "path": "/v2.1/servers/*/ips",
+                "method": "GET",
+                "service": "compute"
+            }
+        ]
+        app_cred = self.create_application_credential(
+            access_rules=access_rules)
+        access_rule_resp = app_cred['access_rules'][0]
+        access_rule_resp.pop('id')
+        self.assertDictEqual(access_rules[0], access_rule_resp)
+
     @decorators.idempotent_id('ff0cd457-6224-46e7-b79e-0ada4964a8a6')
     def test_list_application_credentials(self):
+        """Test listing application credentials"""
         self.create_application_credential()
         self.create_application_credential()
 
@@ -72,6 +97,7 @@
 
     @decorators.idempotent_id('9bb5e5cc-5250-493a-8869-8b665f6aa5f6')
     def test_query_application_credentials(self):
+        """Test listing application credentials filtered by name"""
         self.create_application_credential()
         app_cred_two = self.create_application_credential()
         app_cred_two_name = app_cred_two['name']
diff --git a/tempest/api/identity/v3/test_catalog.py b/tempest/api/identity/v3/test_catalog.py
index bc95f0d..ce6adf9 100644
--- a/tempest/api/identity/v3/test_catalog.py
+++ b/tempest/api/identity/v3/test_catalog.py
@@ -19,9 +19,11 @@
 
 
 class IdentityCatalogTest(base.BaseIdentityV3Test):
+    """Test service's catalog type values"""
 
     @decorators.idempotent_id('56b57ced-22b8-4127-9b8a-565dfb0207e2')
     def test_catalog_standardization(self):
+        """Test that every service has a standard catalog type value"""
         # https://opendev.org/openstack/service-types-authority
         # /src/branch/master/service-types.yaml
         standard_service_values = [{'name': 'keystone', 'type': 'identity'},
@@ -31,11 +33,9 @@
         # next, we need to GET the catalog using the catalog client
         catalog = self.non_admin_catalog_client.show_catalog()['catalog']
         # get list of the service types present in the catalog
-        catalog_services = []
-        for service in catalog:
-            catalog_services.append(service['type'])
+        catalog_services = [service['type'] for service in catalog]
         for service in standard_service_values:
-            # if service enabled, check if it has a standard typevalue
+            # if service enabled, check if it has a standard type value
             if service['name'] == 'keystone' or\
                     getattr(CONF.service_available, service['name']):
                 self.assertIn(service['type'], catalog_services)
diff --git a/tempest/api/identity/v3/test_domains.py b/tempest/api/identity/v3/test_domains.py
index 9f132dd..bb62ea6 100644
--- a/tempest/api/identity/v3/test_domains.py
+++ b/tempest/api/identity/v3/test_domains.py
@@ -21,6 +21,7 @@
 
 
 class DefaultDomainTestJSON(base.BaseIdentityV3Test):
+    """Test identity default domains"""
 
     @classmethod
     def setup_clients(cls):
@@ -35,5 +36,6 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5')
     def test_default_domain_exists(self):
+        """Test showing default domain"""
         domain = self.domains_client.show_domain(self.domain_id)['domain']
         self.assertTrue(domain['enabled'])
diff --git a/tempest/api/identity/v3/test_ec2_credentials.py b/tempest/api/identity/v3/test_ec2_credentials.py
new file mode 100644
index 0000000..a2cbc4a
--- /dev/null
+++ b/tempest/api/identity/v3/test_ec2_credentials.py
@@ -0,0 +1,113 @@
+# Copyright 2020 SUSE LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest.common import utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class EC2CredentialsTest(base.BaseIdentityV3Test):
+
+    @classmethod
+    def skip_checks(cls):
+        super(EC2CredentialsTest, cls).skip_checks()
+        if not utils.is_extension_enabled('OS-EC2', 'identity'):
+            msg = "OS-EC2 identity extension not enabled."
+            raise cls.skipException(msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(EC2CredentialsTest, cls).resource_setup()
+        cls.creds = cls.os_primary.credentials
+
+    @decorators.idempotent_id('b0f55a29-54e5-4166-999d-712347e0c920')
+    def test_create_ec2_credential(self):
+        """Create user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        access = resp['access']
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, access)
+        self.assertNotEmpty(resp['access'])
+        self.assertNotEmpty(resp['secret'])
+        self.assertEqual(self.creds.user_id, resp['user_id'])
+        self.assertEqual(self.creds.tenant_id, resp['tenant_id'])
+
+    @decorators.idempotent_id('897813f0-160c-4fdc-aabc-24ee635ce4a9')
+    def test_list_ec2_credentials(self):
+        """Get the list of user ec2 credentials."""
+        created_creds = []
+        # create first ec2 credentials
+        creds1 = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        created_creds.append(creds1['access'])
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, creds1['access'])
+
+        # create second ec2 credentials
+        creds2 = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        created_creds.append(creds2['access'])
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, creds2['access'])
+
+        # get the list of user ec2 credentials
+        resp = self.non_admin_users_client.list_user_ec2_credentials(
+            self.creds.user_id)["credentials"]
+        fetched_creds = [cred['access'] for cred in resp]
+        # created credentials should be in a fetched list
+        missing = [cred for cred in created_creds
+                   if cred not in fetched_creds]
+        self.assertEmpty(missing,
+                         "Failed to find ec2_credentials %s in fetched list" %
+                         ', '.join(cred for cred in missing))
+
+    @decorators.idempotent_id('8b8d1010-5958-48df-a6cd-5e3df72e6bcf')
+    def test_show_ec2_credential(self):
+        """Get the definite user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, resp['access'])
+
+        ec2_creds = self.non_admin_users_client.show_user_ec2_credential(
+            self.creds.user_id, resp['access']
+        )["credential"]
+        for key in ['access', 'secret', 'user_id', 'tenant_id']:
+            self.assertEqual(ec2_creds[key], resp[key])
+
+    @decorators.idempotent_id('9408d61b-8be0-4a8d-9b85-14f61edb456b')
+    def test_delete_ec2_credential(self):
+        """Delete user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        access = resp['access']
+        self.non_admin_users_client.delete_user_ec2_credential(
+            self.creds.user_id, access)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.non_admin_users_client.show_user_ec2_credential,
+            self.creds.user_id,
+            access)
diff --git a/tempest/api/identity/v3/test_projects.py b/tempest/api/identity/v3/test_projects.py
index bbb4013..338b57b 100644
--- a/tempest/api/identity/v3/test_projects.py
+++ b/tempest/api/identity/v3/test_projects.py
@@ -19,11 +19,13 @@
 
 
 class IdentityV3ProjectsTest(base.BaseIdentityV3Test):
+    """Test identity projects"""
 
     credentials = ['primary', 'alt']
 
     @decorators.idempotent_id('86128d46-e170-4644-866a-cc487f699e1d')
     def test_list_projects_returns_only_authorized_projects(self):
+        """Test listing projects only returns authorized projects"""
         alt_project_name = self.os_alt.credentials.project_name
         resp = self.non_admin_users_client.list_user_projects(
             self.os_primary.credentials.user_id)
diff --git a/tempest/api/identity/v3/test_tokens.py b/tempest/api/identity/v3/test_tokens.py
index fa1c47f..55fcead 100644
--- a/tempest/api/identity/v3/test_tokens.py
+++ b/tempest/api/identity/v3/test_tokens.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import operator
+
 from oslo_utils import timeutils
-import six
 
 from tempest.api.identity import base
 from tempest.lib import decorators
@@ -22,9 +23,11 @@
 
 
 class TokensV3Test(base.BaseIdentityV3Test):
+    """Test identity tokens"""
 
     @decorators.idempotent_id('a9512ac3-3909-48a4-b395-11f438e16260')
     def test_validate_token(self):
+        """Test validating token for user"""
         creds = self.os_primary.credentials
         user_id = creds.user_id
         username = creds.username
@@ -40,6 +43,15 @@
         authenticated_token = self.non_admin_client.show_token(
             subject_token)['token']
         # sanity checking to make sure they are indeed the same token
+        # If there are roles in the token, sort the roles
+        authenticated_token_roles = authenticated_token.get("roles")
+        if authenticated_token_roles:
+            authenticated_token["roles"] = authenticated_token_roles.sort(
+                key=operator.itemgetter('id'))
+        token_body_roles = token_body.get("roles")
+        if token_body_roles:
+            token_body["roles"] = token_body_roles.sort(
+                key=operator.itemgetter('id'))
         self.assertEqual(authenticated_token, token_body)
         # test to see if token has been properly authenticated
         self.assertEqual(authenticated_token['user']['id'], user_id)
@@ -58,7 +70,7 @@
 
     @decorators.idempotent_id('6f8e4436-fc96-4282-8122-e41df57197a9')
     def test_create_token(self):
-
+        """Test creating token for user"""
         creds = self.os_primary.credentials
         user_id = creds.user_id
         username = creds.username
@@ -75,7 +87,7 @@
             auth_data=True)
 
         self.assertNotEmpty(token_id)
-        self.assertIsInstance(token_id, six.string_types)
+        self.assertIsInstance(token_id, str)
 
         now = timeutils.utcnow()
         expires_at = timeutils.normalize_time(
@@ -109,9 +121,12 @@
 
     @decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
     def test_token_auth_creation_existence_deletion(self):
-        # Tests basic token auth functionality in a way that is compatible with
-        # pre-provisioned credentials. The default user is used for token
-        # authentication.
+        """Test auth/check existence/delete token for user
+
+        Tests basic token auth functionality in a way that is compatible with
+        pre-provisioned credentials. The default user is used for token
+        authentication.
+        """
 
         # Valid user's token is authenticated
         user = self.os_primary.credentials
diff --git a/tempest/api/identity/v3/test_users.py b/tempest/api/identity/v3/test_users.py
index d4e7612..dc6dd4a 100644
--- a/tempest/api/identity/v3/test_users.py
+++ b/tempest/api/identity/v3/test_users.py
@@ -28,6 +28,7 @@
 
 
 class IdentityV3UsersTest(base.BaseIdentityV3Test):
+    """Test identity user password"""
 
     @classmethod
     def resource_setup(cls):
@@ -76,12 +77,15 @@
         time.sleep(1)
         self.non_admin_users_client.auth_provider.set_auth()
 
+    @testtools.skipUnless(CONF.identity_feature_enabled.security_compliance,
+                          'Security compliance not available.')
     @decorators.idempotent_id('ad71bd23-12ad-426b-bb8b-195d2b635f27')
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
                       'Skipped because environment has an '
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_user_update_own_password(self):
+        """Test updating user's own password"""
         old_pass = self.creds.password
         old_token = self.non_admin_client.token
         new_pass = data_utils.rand_password()
@@ -111,6 +115,7 @@
                       'immutable user source and solely '
                       'provides read-only access to users.')
     def test_password_history_check_self_service_api(self):
+        """Test checking password changing history"""
         old_pass = self.creds.password
         new_pass1 = data_utils.rand_password()
         new_pass2 = data_utils.rand_password()
@@ -141,6 +146,7 @@
                           'Security compliance not available.')
     @decorators.idempotent_id('a7ad8bbf-2cff-4520-8c1d-96332e151658')
     def test_user_account_lockout(self):
+        """Test locking out user account after failure attempts"""
         if (CONF.identity.user_lockout_failure_attempts <= 0 or
                 CONF.identity.user_lockout_duration <= 0):
             raise self.skipException(
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index ae7b3e4..23e7fd8 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -12,12 +12,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.common import image as common_image
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
 import tempest.test
 
 CONF = config.CONF
@@ -112,7 +113,7 @@
         cls.alt_tenant_id = cls.alt_image_member_client.tenant_id
 
     def _create_image(self):
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         image = self.create_image(container_format='bare',
                                   disk_format='raw',
                                   is_public=False,
@@ -133,12 +134,6 @@
     def setup_clients(cls):
         super(BaseV2ImageTest, cls).setup_clients()
         cls.client = cls.os_primary.image_client_v2
-        cls.namespaces_client = cls.os_primary.namespaces_client
-        cls.resource_types_client = cls.os_primary.resource_types_client
-        cls.namespace_properties_client =\
-            cls.os_primary.namespace_properties_client
-        cls.namespace_objects_client = cls.os_primary.namespace_objects_client
-        cls.namespace_tags_client = cls.os_primary.namespace_tags_client
         cls.schemas_client = cls.os_primary.schemas_client
         cls.versions_client = cls.os_primary.image_versions_client
 
@@ -155,6 +150,15 @@
                         namespace_name)
         return namespace
 
+    @classmethod
+    def get_available_stores(cls):
+        stores = []
+        try:
+            stores = cls.client.info_stores()['stores']
+        except exceptions.NotFound:
+            pass
+        return stores
+
 
 class BaseV2MemberImageTest(BaseV2ImageTest):
 
@@ -194,3 +198,9 @@
     def setup_clients(cls):
         super(BaseV2ImageAdminTest, cls).setup_clients()
         cls.admin_client = cls.os_admin.image_client_v2
+        cls.namespaces_client = cls.os_admin.namespaces_client
+        cls.resource_types_client = cls.os_admin.resource_types_client
+        cls.namespace_properties_client =\
+            cls.os_admin.namespace_properties_client
+        cls.namespace_objects_client = cls.os_admin.namespace_objects_client
+        cls.namespace_tags_client = cls.os_admin.namespace_tags_client
diff --git a/tempest/api/image/v1/test_image_members.py b/tempest/api/image/v1/test_image_members.py
index bf2e510..5e2c8af 100644
--- a/tempest/api/image/v1/test_image_members.py
+++ b/tempest/api/image/v1/test_image_members.py
@@ -19,9 +19,11 @@
 
 
 class ImageMembersTest(base.BaseV1ImageMembersTest):
+    """Test image members"""
 
     @decorators.idempotent_id('1d6ef640-3a20-4c84-8710-d95828fdb6ad')
     def test_add_image_member(self):
+        """Test adding member for image"""
         image = self._create_image()
         self.image_member_client.create_image_member(image, self.alt_tenant_id)
         body = self.image_member_client.list_image_members(image)
@@ -33,6 +35,7 @@
 
     @decorators.idempotent_id('6a5328a5-80e8-4b82-bd32-6c061f128da9')
     def test_get_shared_images(self):
+        """Test getting shared images"""
         image = self._create_image()
         self.image_member_client.create_image_member(image, self.alt_tenant_id)
         share_image = self._create_image()
@@ -47,6 +50,7 @@
 
     @decorators.idempotent_id('a76a3191-8948-4b44-a9d6-4053e5f2b138')
     def test_remove_member(self):
+        """Test removing member from image"""
         image_id = self._create_image()
         self.image_member_client.create_image_member(image_id,
                                                      self.alt_tenant_id)
diff --git a/tempest/api/image/v1/test_image_members_negative.py b/tempest/api/image/v1/test_image_members_negative.py
index 2748bd5..4e3c27c 100644
--- a/tempest/api/image/v1/test_image_members_negative.py
+++ b/tempest/api/image/v1/test_image_members_negative.py
@@ -19,11 +19,12 @@
 
 
 class ImageMembersNegativeTest(base.BaseV1ImageMembersTest):
+    """Negative tests of image members"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('147a9536-18e3-45da-91ea-b037a028f364')
     def test_add_member_with_non_existing_image(self):
-        # Add member with non existing image.
+        """Add member with non existing image"""
         non_exist_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.image_member_client.create_image_member,
@@ -32,7 +33,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e1559f05-b667-4f1b-a7af-518b52dc0c0f')
     def test_delete_member_with_non_existing_image(self):
-        # Delete member with non existing image.
+        """Delete member with non existing image"""
         non_exist_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.image_member_client.delete_image_member,
@@ -41,7 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f5720333-dd69-4194-bb76-d2f048addd56')
     def test_delete_member_with_non_existing_tenant(self):
-        # Delete member with non existing tenant.
+        """Delete member from image with non existing tenant"""
         image_id = self._create_image()
         non_exist_tenant = data_utils.rand_uuid_hex()
         self.assertRaises(lib_exc.NotFound,
@@ -51,7 +52,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f25f89e4-0b6c-453b-a853-1f80b9d7ef26')
     def test_get_image_without_membership(self):
-        # Image is hidden from another tenants.
+        """Get image without membership
+
+        Image is hidden from another tenants.
+        """
         image_id = self._create_image()
         self.assertRaises(lib_exc.NotFound,
                           self.alt_img_cli.show_image,
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 2432c8b..6fd6c4e 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.image import base
 from tempest.common import image as common_image
@@ -57,7 +57,7 @@
 
     @decorators.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
     def test_register_then_upload(self):
-        # Register, then upload an image
+        """Register, then upload an image"""
         properties = {'prop1': 'val1'}
         container_format, disk_format = get_container_and_disk_format()
         image = self.create_image(name='New Name',
@@ -72,14 +72,14 @@
             self.assertEqual(val, image.get('properties')[key])
 
         # Now try uploading an image file
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         body = self.client.update_image(image['id'], data=image_file)['image']
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
 
     @decorators.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
     def test_register_remote_image(self):
-        # Register a new remote image
+        """Register a new remote image"""
         container_format, disk_format = get_container_and_disk_format()
         body = self.create_image(name='New Remote Image',
                                  container_format=container_format,
@@ -96,6 +96,7 @@
 
     @decorators.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
     def test_register_http_image(self):
+        """Register a new image from an http image path url"""
         container_format, disk_format = get_container_and_disk_format()
         image = self.create_image(name='New Http Image',
                                   container_format=container_format,
@@ -108,7 +109,7 @@
 
     @decorators.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
     def test_register_image_with_min_ram(self):
-        # Register an image with min ram
+        """Register an image with min ram"""
         container_format, disk_format = get_container_and_disk_format()
         properties = {'prop1': 'val1'}
         body = self.create_image(name='New_image_with_min_ram',
@@ -203,7 +204,7 @@
         Note that the size of the new image is a random number between
         1024 and 4096
         """
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         image = cls.create_image(name=name,
                                  container_format=container_format,
@@ -213,7 +214,7 @@
 
     @decorators.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
     def test_index_no_params(self):
-        # Simple test to see all fixture images returned
+        """Simple test to see all fixture images returned"""
         images_list = self.client.list_images()['images']
         image_list = [image['id'] for image in images_list]
         for image_id in self.created_images:
@@ -221,6 +222,7 @@
 
     @decorators.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
     def test_index_disk_format(self):
+        """Test listing images by disk format"""
         images_list = self.client.list_images(
             disk_format=self.disk_format_alt)['images']
         for image in images_list:
@@ -232,6 +234,7 @@
 
     @decorators.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
     def test_index_container_format(self):
+        """Test listing images by container format"""
         images_list = self.client.list_images(
             container_format=self.container_format)['images']
         for image in images_list:
@@ -243,6 +246,7 @@
 
     @decorators.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
     def test_index_max_size(self):
+        """Test listing images by max size"""
         images_list = self.client.list_images(size_max=42)['images']
         for image in images_list:
             self.assertLessEqual(image['size'], 42)
@@ -252,6 +256,7 @@
 
     @decorators.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
     def test_index_min_size(self):
+        """Test listing images by min size"""
         images_list = self.client.list_images(size_min=142)['images']
         for image in images_list:
             self.assertGreaterEqual(image['size'], 142)
@@ -261,6 +266,7 @@
 
     @decorators.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
     def test_index_status_active_detail(self):
+        """Test listing active images sorting by size in descending order"""
         images_list = self.client.list_images(detail=True,
                                               status='active',
                                               sort_key='size',
@@ -274,6 +280,7 @@
 
     @decorators.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
     def test_index_name(self):
+        """Test listing images by its name"""
         images_list = self.client.list_images(
             detail=True,
             name='New Remote Image dup')['images']
@@ -285,6 +292,8 @@
 
 
 class UpdateImageMetaTest(base.BaseV1ImageTest):
+    """Test image metadata"""
+
     @classmethod
     def resource_setup(cls):
         super(UpdateImageMetaTest, cls).resource_setup()
@@ -297,7 +306,7 @@
                                disk_format, size):
         """Create a new standard image and return newly-registered image-id"""
 
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         image = cls.create_image(name=name,
                                  container_format=container_format,
@@ -308,6 +317,7 @@
 
     @decorators.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
     def test_list_image_metadata(self):
+        """Test listing image metadata"""
         # All metadata key/value pairs for an image should be returned
         resp = self.client.check_image(self.image_id)
         resp_metadata = common_image.get_image_meta_from_headers(resp)
@@ -316,6 +326,7 @@
 
     @decorators.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
     def test_update_image_metadata(self):
+        """Test updating image metadata"""
         # The metadata for the image should match the updated values
         req_metadata = {'key1': 'alt1', 'key2': 'value2'}
         resp = self.client.check_image(self.image_id)
diff --git a/tempest/api/image/v1/test_images_negative.py b/tempest/api/image/v1/test_images_negative.py
index 690b8da..2af1288 100644
--- a/tempest/api/image/v1/test_images_negative.py
+++ b/tempest/api/image/v1/test_images_negative.py
@@ -26,7 +26,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('036ede36-6160-4463-8c01-c781eee6369d')
     def test_register_with_invalid_container_format(self):
-        # Negative tests for invalid data supplied to POST /images
+        """Create image with invalid container format
+
+        Negative tests for invalid data supplied to POST /images
+        """
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
                           headers={'x-image-meta-name': 'test',
                                    'x-image-meta-container_format': 'wrong',
@@ -35,6 +38,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('993face5-921d-4e84-aabf-c1bba4234a67')
     def test_register_with_invalid_disk_format(self):
+        """Create image with invalid disk format"""
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
                           headers={'x-image-meta-name': 'test',
                                    'x-image-meta-container_format': 'bare',
@@ -43,7 +47,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ec652588-7e3c-4b67-a2f2-0fa96f57c8fc')
     def test_delete_non_existent_image(self):
-        # Return an error while trying to delete a non-existent image
+        """Return an error while trying to delete a non-existent image"""
 
         non_existent_image_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
@@ -52,13 +56,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('04f72aa3-fcec-45a3-81a3-308ef7cc82bc')
     def test_delete_image_blank_id(self):
-        # Return an error while trying to delete an image with blank Id
+        """Return an error while trying to delete an image with blank Id"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('950e5054-a3c7-4dee-ada5-e576f1087abd')
     def test_delete_image_non_hex_string_id(self):
-        # Return an error while trying to delete an image with non hex id
+        """Return an error while trying to delete an image with non hex id"""
         invalid_image_id = data_utils.rand_uuid()[:-1] + "j"
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           invalid_image_id)
@@ -66,13 +70,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4ed757cd-450c-44b1-9fd1-c819748c650d')
     def test_delete_image_negative_image_id(self):
-        # Return an error while trying to delete an image with negative id
+        """Return an error while trying to delete an image with negative id"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image, -1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a4a448ab-3db2-4d2d-b9b2-6a1271241dfe')
     def test_delete_image_id_over_character_limit(self):
-        # Return an error while trying to delete image with id over limit
+        """Return an error while trying to delete image with id over limit"""
         overlimit_image_id = data_utils.rand_uuid() + "1"
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           overlimit_image_id)
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index dbb8c58..733c778 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -13,16 +13,24 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
+
 from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
+    """"Test image operations about image owner"""
 
     @decorators.related_bug('1420008')
     @decorators.idempotent_id('646a6eaa-135f-4493-a0af-12583021224e')
     def test_create_image_owner_param(self):
+        """Test creating image with specified owner"""
         # NOTE: Create image with owner different from tenant owner by
         # using "owner" parameter requires an admin privileges.
         random_id = data_utils.rand_uuid_hex()
@@ -35,6 +43,7 @@
     @decorators.related_bug('1420008')
     @decorators.idempotent_id('525ba546-10ef-4aad-bba1-1858095ce553')
     def test_update_image_owner_param(self):
+        """Test updating image owner"""
         random_id_1 = data_utils.rand_uuid_hex()
         image = self.admin_client.create_image(
             container_format='bare', disk_format='raw', owner=random_id_1)
@@ -49,3 +58,65 @@
         self.assertEqual(random_id_2, updated_image_info['owner'])
         self.assertNotEqual(created_image_info['owner'],
                             updated_image_info['owner'])
+
+
+class ImportCopyImagesTest(base.BaseV2ImageAdminTest):
+    """Test the import copy-image operations"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportCopyImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('9b3b644e-03d1-11eb-a036-fa163e2eaf49')
+    def test_image_copy_image_import(self):
+        """Test 'copy-image' import functionalities
+
+        Create image, import image with copy-image method and
+        verify that import succeeded.
+        """
+        available_stores = self.get_available_stores()
+        available_import_methods = self.client.info_import()[
+            'import-methods']['value']
+        # NOTE(gmann): Skip if copy-image import method and multistore
+        # are not available.
+        if ('copy-image' not in available_import_methods or
+            not available_stores):
+            raise self.skipException('Either copy-image import method or '
+                                     'multistore is not available')
+        uuid = data_utils.rand_uuid()
+        image_name = data_utils.rand_name('copy-image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private',
+                                  ramdisk_id=uuid)
+        self.assertEqual('queued', image['status'])
+
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual(len(file_content), body.get('size'))
+        self.assertEqual('active', body['status'])
+
+        # Copy image to all the stores. In case of all_stores request
+        # glance will skip the stores where image is already available.
+        self.admin_client.image_import(image['id'], method='copy-image',
+                                       all_stores=True,
+                                       all_stores_must_succeed=False)
+
+        # Wait for copy to finished on all stores.
+        failed_stores = waiters.wait_for_image_copied_to_stores(
+            self.client, image['id'])
+        # Assert if copy is failed on any store.
+        self.assertEqual(0, len(failed_stores),
+                         "Failed to copy the following stores: %s" %
+                         str(failed_stores))
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
similarity index 92%
rename from tempest/api/image/v2/test_images_metadefs_namespace_objects.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
index 80f8112..9222920 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
@@ -16,7 +16,7 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespaceObjectsTest(base.BaseV2ImageTest):
+class MetadataNamespaceObjectsTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace objects basic functionality"""
 
     def _create_namespace_object(self, namespace):
@@ -30,6 +30,7 @@
 
     @decorators.idempotent_id('b1a3775e-3b5c-4f6a-a3b4-1ba3574ae718')
     def test_create_update_delete_meta_namespace_objects(self):
+        """Test creating/updating/deleting image metadata namespace objects"""
         # Create a namespace
         namespace = self.create_namespace()
         # Create a namespace object
@@ -52,6 +53,7 @@
 
     @decorators.idempotent_id('a2a3615e-3b5c-3f6a-a2b1-1ba3574ae738')
     def test_list_meta_namespace_objects(self):
+        """Test listing image metadata namespace objects"""
         # Create a namespace object
         namespace = self.create_namespace()
         meta_namespace_object = self._create_namespace_object(namespace)
@@ -64,6 +66,7 @@
 
     @decorators.idempotent_id('b1a3674e-3b4c-3f6a-a3b4-1ba3573ca768')
     def test_show_meta_namespace_objects(self):
+        """Test showing image metadata namespace object"""
         # Create a namespace object
         namespace = self.create_namespace()
         namespace_object = self._create_namespace_object(namespace)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
similarity index 94%
rename from tempest/api/image/v2/test_images_metadefs_namespace_properties.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
index ed91726..10dfba1 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
@@ -15,11 +15,12 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespacePropertiesTest(base.BaseV2ImageTest):
+class MetadataNamespacePropertiesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace property basic functionality"""
 
     @decorators.idempotent_id('b1a3765e-3a5d-4f6d-a3a7-3ca3476ae768')
     def test_basic_meta_def_namespace_property(self):
+        """Test operations of image metadata definition namespace property"""
         # Get the available resource types and use one resource_type
         body = self.resource_types_client.list_resource_types()
         resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
similarity index 94%
rename from tempest/api/image/v2/test_images_metadefs_namespace_tags.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
index 482e808..9e88e03 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
@@ -16,7 +16,7 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespaceTagsTest(base.BaseV2ImageTest):
+class MetadataNamespaceTagsTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace tags basic functionality"""
 
     tags = [
@@ -43,6 +43,7 @@
 
     @decorators.idempotent_id('a2a3765e-3a6d-4f6d-a3a7-3cc3476aa876')
     def test_create_list_delete_namespace_tags(self):
+        """Test creating/listing/deleting image metadata namespace tags"""
         # Create a namespace
         namespace = self.create_namespace()
         self._create_namespace_tags(namespace)
@@ -62,6 +63,7 @@
 
     @decorators.idempotent_id('a2a3765e-1a2c-3f6d-a3a7-3cc3466ab875')
     def test_create_update_delete_tag(self):
+        """Test creating/updating/deleting image metadata namespace tag"""
         # Create a namespace
         namespace = self.create_namespace()
         self._create_namespace_tags(namespace)
diff --git a/tempest/api/image/v2/test_images_metadefs_namespaces.py b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
similarity index 96%
rename from tempest/api/image/v2/test_images_metadefs_namespaces.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
index f71b16c..0fe49f9 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
@@ -20,11 +20,12 @@
 from tempest.lib import exceptions as lib_exc
 
 
-class MetadataNamespacesTest(base.BaseV2ImageTest):
+class MetadataNamespacesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition Namespaces basic functionality"""
 
     @decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
     def test_basic_metadata_definition_namespaces(self):
+        """Test operations of image metadata definition namespaces"""
         # get the available resource types and use one resource_type
         body = self.resource_types_client.list_resource_types()
         resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images_metadefs_resource_types.py b/tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
similarity index 95%
rename from tempest/api/image/v2/test_images_metadefs_resource_types.py
rename to tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
index c60b3f7..e533c43 100644
--- a/tempest/api/image/v2/test_images_metadefs_resource_types.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
@@ -17,11 +17,12 @@
 from tempest.lib import decorators
 
 
-class MetadataResourceTypesTest(base.BaseV2ImageTest):
+class MetadataResourceTypesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition resource types basic functionality"""
 
     @decorators.idempotent_id('6f358a4e-5ef0-11e6-a795-080027d0d606')
     def test_basic_meta_def_resource_type_association(self):
+        """Test image resource type associations"""
         # Get the available resource types and use one resource_type
         body = self.resource_types_client.list_resource_types()
         resource_name = body['resource_types'][0]['name']
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 5a27a43..d283ab3 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -14,12 +14,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
 import random
 
-import six
-
 from oslo_log import log as logging
 from tempest.api.image import base
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -29,6 +29,242 @@
 LOG = logging.getLogger(__name__)
 
 
+class ImportImagesTest(base.BaseV2ImageTest):
+    """Here we test the import operations for image"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImportImagesTest, cls).resource_setup()
+        cls.available_import_methods = cls.client.info_import()[
+            'import-methods']['value']
+        if not cls.available_import_methods:
+            raise cls.skipException('Server does not support '
+                                    'any import method')
+
+    def _create_image(self):
+        # Create image
+        uuid = '00000000-1111-2222-3333-444455556666'
+        image_name = data_utils.rand_name('image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private',
+                                  ramdisk_id=uuid)
+        self.assertIn('name', image)
+        self.assertEqual(image_name, image['name'])
+        self.assertIn('visibility', image)
+        self.assertEqual('private', image['visibility'])
+        self.assertIn('status', image)
+        self.assertEqual('queued', image['status'])
+        return image
+
+    def _require_import_method(self, method):
+        if method not in self.available_import_methods:
+            raise self.skipException('Server does not support '
+                                     '%s import method' % method)
+
+    def _stage_and_check(self):
+        image = self._create_image()
+        # Stage image data
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.stage_image_file(image['id'], image_file)
+        # Check image status is 'uploading'
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('uploading', body['status'])
+        return image['id']
+
+    @decorators.idempotent_id('32ca0c20-e16f-44ac-8590-07869c9b4cc2')
+    def test_image_glance_direct_import(self):
+        """Test 'glance-direct' import functionalities
+
+        Create image, stage image data, import image and verify
+        that import succeeded.
+        """
+        self._require_import_method('glance-direct')
+
+        image_id = self._stage_and_check()
+        # import image from staging to backend
+        resp = self.client.image_import(image_id, method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.client, image_id)
+
+        if not self.versions_client.has_version('2.12'):
+            # API is not new enough to support image/tasks API
+            LOG.info('Glance does not support v2.12, so I am unable to '
+                     'validate the image/tasks API.')
+            return
+
+        tasks = waiters.wait_for_image_tasks_status(
+            self.client, image_id, 'success')
+        self.assertEqual(1, len(tasks))
+        task = tasks[0]
+        self.assertEqual(resp.response['x-openstack-request-id'],
+                         task['request_id'])
+        self.assertEqual('glance-direct',
+                         task['input']['import_req']['method']['name'])
+
+    @decorators.idempotent_id('f6feb7a4-b04f-4706-a011-206129f83e62')
+    def test_image_web_download_import(self):
+        """Test 'web-download' import functionalities
+
+        Create image, import image and verify that import
+        succeeded.
+        """
+        self._require_import_method('web-download')
+
+        image = self._create_image()
+        # Now try to get image details
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('queued', body['status'])
+        # import image from web to backend
+        image_uri = CONF.image.http_image
+        self.client.image_import(image['id'], method='web-download',
+                                 image_uri=image_uri)
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'])
+
+    @decorators.idempotent_id('e04761a1-22af-42c2-b8bc-a34a3f12b585')
+    def test_remote_import(self):
+        """Test image import against a different worker than stage.
+
+        This creates and stages an image against the primary API worker,
+        but then calls import on a secondary worker (if available) to
+        test that distributed image import works (i.e. proxies the import
+        request to the proper worker).
+        """
+        self._require_import_method('glance-direct')
+
+        if not CONF.image.alternate_image_endpoint:
+            raise self.skipException('No image_remote service to test '
+                                     'against')
+
+        image_id = self._stage_and_check()
+        # import image from staging to backend, but on the alternate worker
+        self.os_primary.image_client_remote.image_import(
+            image_id, method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.client, image_id)
+
+    @decorators.idempotent_id('44d60544-1524-42f7-8899-315301105dd8')
+    def test_remote_delete(self):
+        """Test image delete against a different worker than stage.
+
+        This creates and stages an image against the primary API worker,
+        but then calls delete on a secondary worker (if available) to
+        test that distributed image import works (i.e. proxies the delete
+        request to the proper worker).
+        """
+        self._require_import_method('glance-direct')
+
+        if not CONF.image.alternate_image_endpoint:
+            raise self.skipException('No image_remote service to test '
+                                     'against')
+
+        image_id = self._stage_and_check()
+        # delete image from staging to backend, but on the alternate worker
+        self.os_primary.image_client_remote.delete_image(image_id)
+        self.client.wait_for_resource_deletion(image_id)
+
+
+class MultiStoresImportImagesTest(base.BaseV2ImageTest):
+    """Test importing image in multiple stores"""
+    @classmethod
+    def skip_checks(cls):
+        super(MultiStoresImportImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(MultiStoresImportImagesTest, cls).resource_setup()
+        cls.available_import_methods = cls.client.info_import()[
+            'import-methods']['value']
+        if not cls.available_import_methods:
+            raise cls.skipException('Server does not support '
+                                    'any import method')
+
+        # NOTE(pdeore): Skip if glance-direct import method and mutlistore
+        # are not enabled/configured, or only one store is configured in
+        # multiple stores setup.
+        cls.available_stores = cls.get_available_stores()
+        if ('glance-direct' not in cls.available_import_methods or
+                not len(cls.available_stores) > 1):
+            raise cls.skipException(
+                'Either glance-direct import method not present in %s or '
+                'None or only one store is '
+                'configured %s' % (cls.available_import_methods,
+                                   cls.available_stores))
+
+    def _create_and_stage_image(self, all_stores=False):
+        """Create Image & stage image file for glance-direct import method."""
+        image_name = data_utils.rand_name('test-image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private')
+        self.assertEqual('queued', image['status'])
+
+        self.client.stage_image_file(
+            image['id'],
+            io.BytesIO(data_utils.random_bytes()))
+        # Check image status is 'uploading'
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('uploading', body['status'])
+
+        if all_stores:
+            stores_list = ','.join([store['id']
+                                    for store in self.available_stores])
+        else:
+            stores = [store['id'] for store in self.available_stores]
+            stores_list = stores[::len(stores) - 1]
+
+        return body, stores_list
+
+    @decorators.idempotent_id('bf04ff00-3182-47cb-833a-f1c6767b47fd')
+    def test_glance_direct_import_image_to_all_stores(self):
+        """Test image is imported in all available stores
+
+        Create image, import image to all available stores using glance-direct
+        import method and verify that import succeeded.
+        """
+        image, stores = self._create_and_stage_image(all_stores=True)
+
+        self.client.image_import(
+            image['id'], method='glance-direct', all_stores=True)
+
+        waiters.wait_for_image_imported_to_stores(self.client,
+                                                  image['id'], stores)
+
+    @decorators.idempotent_id('82fb131a-dd2b-11ea-aec7-340286b6c574')
+    def test_glance_direct_import_image_to_specific_stores(self):
+        """Test image is imported in all available stores
+
+        Create image, import image to specified store(s) using glance-direct
+        import method and verify that import succeeded.
+        """
+        image, stores = self._create_and_stage_image()
+        self.client.image_import(image['id'], method='glance-direct',
+                                 stores=stores)
+
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'],
+                                                  (','.join(stores)))
+
+
 class BasicOperationsImagesTest(base.BaseV2ImageTest):
     """Here we test the basic operations of images"""
 
@@ -66,7 +302,7 @@
 
         # Now try uploading an image file
         file_content = data_utils.random_bytes()
-        image_file = six.BytesIO(file_content)
+        image_file = io.BytesIO(file_content)
         self.client.store_image_file(image['id'], image_file)
 
         # Now try to get image details
@@ -88,8 +324,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
     def test_delete_image(self):
-        # Deletes an image by image_id
-
+        """Test deleting an image by image_id"""
         # Create image
         image_name = data_utils.rand_name('image')
         container_format = CONF.image.container_formats[0]
@@ -110,8 +345,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
     def test_update_image(self):
-        # Updates an image by image_id
-
+        """Test updating an image by image_id"""
         # Create image
         image_name = data_utils.rand_name('image')
         container_format = CONF.image.container_formats[0]
@@ -135,6 +369,7 @@
 
     @decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
     def test_deactivate_reactivate_image(self):
+        """Test deactivating and reactivating an image"""
         # Create image
         image_name = data_utils.rand_name('image')
         image = self.create_image(name=image_name,
@@ -144,7 +379,7 @@
 
         # Upload an image file
         content = data_utils.random_bytes()
-        image_file = six.BytesIO(content)
+        image_file = io.BytesIO(content)
         self.client.store_image_file(image['id'], image_file)
 
         # Deactivate image
@@ -194,7 +429,7 @@
         1024 and 4096
         """
         size = random.randint(1024, 4096)
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
         image = cls.create_image(container_format=container_format,
                                  disk_format=disk_format,
@@ -229,13 +464,14 @@
         # Validate that the list was fetched sorted accordingly
         msg = 'No images were found that met the filter criteria.'
         self.assertNotEmpty(images_list, msg)
-        sorted_list = [image['size'] for image in images_list]
+        sorted_list = [image['size'] for image in images_list
+                       if image['size'] is not None]
         msg = 'The list of images was not sorted correctly.'
         self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
 
     @decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
     def test_list_no_params(self):
-        # Simple test to see all fixture images returned
+        """Simple test to see all fixture images returned"""
         images_list = self.client.list_images()['images']
         image_list = [image['id'] for image in images_list]
 
@@ -244,25 +480,25 @@
 
     @decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
     def test_list_images_param_container_format(self):
-        # Test to get all images with a specific container_format
+        """Test to get all images with a specific container_format"""
         params = {"container_format": self.test_data['container_format']}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
     def test_list_images_param_disk_format(self):
-        # Test to get all images with disk_format = raw
+        """Test to get all images with disk_format = raw"""
         params = {"disk_format": "raw"}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
     def test_list_images_param_visibility(self):
-        # Test to get all images with visibility = private
+        """Test to get all images with visibility = private"""
         params = {"visibility": "private"}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
     def test_list_images_param_size(self):
-        # Test to get all images by size
+        """Test to get all images by size"""
         image_id = self.created_images[0]
         # Get image metadata
         image = self.client.show_image(image_id)
@@ -272,7 +508,7 @@
 
     @decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
     def test_list_images_param_min_max_size(self):
-        # Test to get all images with size between 2000 to 3000
+        """Test to get all images with min size and max size"""
         image_id = self.created_images[0]
         # Get image metadata
         image = self.client.show_image(image_id)
@@ -290,13 +526,13 @@
 
     @decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
     def test_list_images_param_status(self):
-        # Test to get all active images
+        """Test to get all active images"""
         params = {"status": "active"}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
     def test_list_images_param_limit(self):
-        # Test to get images by limit
+        """Test to get images by limit"""
         params = {"limit": 1}
         images_list = self.client.list_images(params=params)['images']
 
@@ -305,7 +541,7 @@
 
     @decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
     def test_list_image_param_owner(self):
-        # Test to get images by owner
+        """Test to get images by owner"""
         image_id = self.created_images[0]
         # Get image metadata
         image = self.client.show_image(image_id)
@@ -315,13 +551,13 @@
 
     @decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
     def test_list_images_param_name(self):
-        # Test to get images by name
+        """Test to get images by name"""
         params = {'name': self.test_data['name']}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
     def test_list_images_param_tag(self):
-        # Test to get images matching a tag
+        """Test to get images matching a tag"""
         params = {'tag': self.test_data['tags'][0]}
         images_list = self.client.list_images(params=params)['images']
         # Validating properties of fetched images
@@ -336,24 +572,26 @@
 
     @decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
     def test_list_images_param_sort(self):
+        """Test listing images sorting in descending order"""
         params = {'sort': 'size:desc'}
         self._list_sorted_by_image_size_and_assert(params, desc=True)
 
     @decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
     def test_list_images_param_sort_key_dir(self):
+        """Test listing images sorting by size in descending order"""
         params = {'sort_key': 'size', 'sort_dir': 'desc'}
         self._list_sorted_by_image_size_and_assert(params, desc=True)
 
     @decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
     def test_get_image_schema(self):
-        # Test to get image schema
+        """Test to get image schema"""
         schema = "image"
         body = self.schemas_client.show_schema(schema)
         self.assertEqual("image", body['name'])
 
     @decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
     def test_get_images_schema(self):
-        # Test to get images schema
+        """Test to get images schema"""
         schema = "images"
         body = self.schemas_client.show_schema(schema)
         self.assertEqual("images", body['name'])
@@ -372,8 +610,9 @@
 
     @decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
     def test_list_images_param_member_status(self):
+        """Test listing images by member_status and visibility"""
         # Create an image to be shared using default visibility
-        image_file = six.BytesIO(data_utils.random_bytes(2048))
+        image_file = io.BytesIO(data_utils.random_bytes(2048))
         container_format = CONF.image.container_formats[0]
         disk_format = CONF.image.disk_formats[0]
         image = self.create_image(container_format=container_format,
diff --git a/tempest/api/image/v2/test_images_member.py b/tempest/api/image/v2/test_images_member.py
index e19d8c8..bc67859 100644
--- a/tempest/api/image/v2/test_images_member.py
+++ b/tempest/api/image/v2/test_images_member.py
@@ -15,9 +15,11 @@
 
 
 class ImagesMemberTest(base.BaseV2MemberImageTest):
+    """Test image members"""
 
     @decorators.idempotent_id('5934c6ea-27dc-4d6e-9421-eeb5e045494a')
     def test_image_share_accept(self):
+        """Test sharing and accepting an image"""
         image_id = self._create_image()
         member = self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
@@ -41,6 +43,7 @@
 
     @decorators.idempotent_id('d9e83e5f-3524-4b38-a900-22abcb26e90e')
     def test_image_share_reject(self):
+        """Test sharing and rejecting an image"""
         image_id = self._create_image()
         member = self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
@@ -57,6 +60,7 @@
 
     @decorators.idempotent_id('a6ee18b9-4378-465e-9ad9-9a6de58a3287')
     def test_get_image_member(self):
+        """Test getting image members after the image is accepted"""
         image_id = self._create_image()
         self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
@@ -75,6 +79,7 @@
 
     @decorators.idempotent_id('72989bc7-2268-48ed-af22-8821e835c914')
     def test_remove_image_member(self):
+        """Test removing image members after the image is accepted"""
         image_id = self._create_image()
         self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
@@ -89,10 +94,12 @@
 
     @decorators.idempotent_id('634dcc3f-f6e2-4409-b8fd-354a0bb25d83')
     def test_get_image_member_schema(self):
+        """Test getting image member schema"""
         body = self.schemas_client.show_schema("member")
         self.assertEqual("member", body['name'])
 
     @decorators.idempotent_id('6ae916ef-1052-4e11-8d36-b3ae14853cbb')
     def test_get_image_members_schema(self):
+        """Test getting image members schema"""
         body = self.schemas_client.show_schema("members")
         self.assertEqual("members", body['name'])
diff --git a/tempest/api/image/v2/test_images_member_negative.py b/tempest/api/image/v2/test_images_member_negative.py
index caa90f9..5f6f1ae 100644
--- a/tempest/api/image/v2/test_images_member_negative.py
+++ b/tempest/api/image/v2/test_images_member_negative.py
@@ -16,10 +16,12 @@
 
 
 class ImagesMemberNegativeTest(base.BaseV2MemberImageTest):
+    """Negative tests of image members"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b79efb37-820d-4cf0-b54c-308b00cf842c')
     def test_image_share_invalid_status(self):
+        """Test updating image member status to invalid status should fail"""
         image_id = self._create_image()
         member = self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
@@ -32,6 +34,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('27002f74-109e-4a37-acd0-f91cd4597967')
     def test_image_share_owner_cannot_accept(self):
+        """Test that image owner can't accept image shared to other member"""
         image_id = self._create_image()
         member = self.image_member_client.create_image_member(
             image_id, member=self.alt_tenant_id)
diff --git a/tempest/api/image/v2/test_images_metadefs_schema.py b/tempest/api/image/v2/test_images_metadefs_schema.py
index 95cc310..7dd36d2 100644
--- a/tempest/api/image/v2/test_images_metadefs_schema.py
+++ b/tempest/api/image/v2/test_images_metadefs_schema.py
@@ -18,64 +18,64 @@
 
 
 class MetadataSchemaTest(base.BaseV2ImageTest):
-    """Test to get metadata schema"""
+    """Test to get image metadata schema"""
 
     @decorators.idempotent_id('e9e44891-3cb8-3b40-a532-e0a39fea3dab')
     def test_get_metadata_namespace_schema(self):
-        # Test to get namespace schema
+        """Test to get image namespace schema"""
         body = self.schemas_client.show_schema("metadefs/namespace")
         self.assertEqual("namespace", body['name'])
 
     @decorators.idempotent_id('ffe44891-678b-3ba0-a3e2-e0a3967b3aeb')
     def test_get_metadata_namespaces_schema(self):
-        # Test to get namespaces schema
+        """Test to get image namespaces schema"""
         body = self.schemas_client.show_schema("metadefs/namespaces")
         self.assertEqual("namespaces", body['name'])
 
     @decorators.idempotent_id('fde34891-678b-3b40-ae32-e0a3e67b6beb')
     def test_get_metadata_resource_type_schema(self):
-        # Test to get resource_type schema
+        """Test to get image resource_type schema"""
         body = self.schemas_client.show_schema("metadefs/resource_type")
         self.assertEqual("resource_type_association", body['name'])
 
     @decorators.idempotent_id('dfe4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
     def test_get_metadata_resources_types_schema(self):
-        # Test to get resource_types schema
+        """Test to get image resource_types schema"""
         body = self.schemas_client.show_schema("metadefs/resource_types")
         self.assertEqual("resource_type_associations", body['name'])
 
     @decorators.idempotent_id('dff4a891-b38b-3bf0-a3b2-e03ee67b3a3b')
     def test_get_metadata_object_schema(self):
-        # Test to get object schema
+        """Test to get image object schema"""
         body = self.schemas_client.show_schema("metadefs/object")
         self.assertEqual("object", body['name'])
 
     @decorators.idempotent_id('dee4a891-b38b-3bf0-a3b2-e03ee67b3a3c')
     def test_get_metadata_objects_schema(self):
-        # Test to get objects schema
+        """Test to get image objects schema"""
         body = self.schemas_client.show_schema("metadefs/objects")
         self.assertEqual("objects", body['name'])
 
     @decorators.idempotent_id('dae4a891-b38b-3bf0-a3b2-e03ee67b3a3d')
     def test_get_metadata_property_schema(self):
-        # Test to get property schema
+        """Test to get image property schema"""
         body = self.schemas_client.show_schema("metadefs/property")
         self.assertEqual("property", body['name'])
 
     @decorators.idempotent_id('dce4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
     def test_get_metadata_properties_schema(self):
-        # Test to get properties schema
+        """Test to get image properties schema"""
         body = self.schemas_client.show_schema("metadefs/properties")
         self.assertEqual("properties", body['name'])
 
     @decorators.idempotent_id('dde4a891-b38b-3bf0-a3b2-e03ee67b3a3e')
     def test_get_metadata_tag_schema(self):
-        # Test to get tag schema
+        """Test to get image tag schema"""
         body = self.schemas_client.show_schema("metadefs/tag")
         self.assertEqual("tag", body['name'])
 
     @decorators.idempotent_id('cde4a891-b38b-3bf0-a3b2-e03ee67b3a3a')
     def test_get_metadata_tags_schema(self):
-        # Test to get tags schema
+        """Test to get image tags schema"""
         body = self.schemas_client.show_schema("metadefs/tags")
         self.assertEqual("tags", body['name'])
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index b4baf05..a3802a9 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -14,11 +14,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.api.image import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ImagesNegativeTest(base.BaseV2ImageTest):
 
@@ -36,7 +41,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
     def test_get_non_existent_image(self):
-        # get the non-existent image
+        """Get the non-existent image"""
         non_existent_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.show_image,
                           non_existent_id)
@@ -44,14 +49,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
     def test_get_image_null_id(self):
-        # get image with image_id = NULL
+        """Get image with image_id = NULL"""
         image_id = ""
         self.assertRaises(lib_exc.NotFound, self.client.show_image, image_id)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
     def test_get_delete_deleted_image(self):
-        # get and delete the deleted image
+        """Get and delete the deleted image"""
         # create and delete image
         image = self.client.create_image(name='test',
                                          container_format='bare',
@@ -70,7 +75,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
     def test_delete_non_existing_image(self):
-        # delete non-existent image
+        """Delete non-existent image"""
         non_existent_image_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           non_existent_image_id)
@@ -78,7 +83,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
     def test_delete_image_null_id(self):
-        # delete image with image_id=NULL
+        """Delete image with image_id=NULL"""
         image_id = ""
         self.assertRaises(lib_exc.NotFound, self.client.delete_image,
                           image_id)
@@ -86,7 +91,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
     def test_register_with_invalid_container_format(self):
-        # Negative tests for invalid data supplied to POST /images
+        """Create image with invalid container format
+
+        Negative tests for invalid data supplied to POST /images
+        """
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
                           name='test', container_format='wrong',
                           disk_format='vhd')
@@ -94,6 +102,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
     def test_register_with_invalid_disk_format(self):
+        """Create image with invalid disk format"""
         self.assertRaises(lib_exc.BadRequest, self.client.create_image,
                           name='test', container_format='bare',
                           disk_format='wrong')
@@ -101,7 +110,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ab980a34-8410-40eb-872b-f264752f46e5')
     def test_delete_protected_image(self):
-        # Create a protected image
+        """Create a protected image"""
         image = self.create_image(protected=True)
         self.addCleanup(self.client.update_image, image['id'],
                         [dict(replace="/protected", value=False)])
@@ -110,3 +119,102 @@
         self.assertRaises(lib_exc.Forbidden,
                           self.client.delete_image,
                           image['id'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('a0ae75d4-ce9c-4576-b823-aba04c8acabd')
+    def test_update_image_reserved_property(self):
+        """Attempt to add a reserved property to an image.
+
+        Glance bans some internal-use-only properties such that they will
+        cause a PATCH to fail. Since os_glance_* is banned, we can use a
+        key in that namespace here.
+        """
+        if not CONF.image_feature_enabled.os_glance_reserved:
+            raise self.skipException('os_glance_reserved is not enabled')
+
+        image = self.create_image(name='test',
+                                  container_format='bare',
+                                  disk_format='raw')
+        self.assertRaises(lib_exc.Forbidden,
+                          self.client.update_image,
+                          image['id'], [{'add': '/os_glance_foo',
+                                         'value': 'bar'}])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('e3fb7df8-2742-4143-8976-f1b26870f0a0')
+    def test_create_image_reserved_property(self):
+        """Attempt to create an image with a reserved property.
+
+        Glance bans some internal-use-only properties such that they will
+        cause an image create to fail. Since os_glance_* is banned, we can
+        use a key in that namespace here.
+        """
+        if not CONF.image_feature_enabled.os_glance_reserved:
+            raise self.skipException('os_glance_reserved is not enabled')
+
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_image,
+                          name='test',
+                          container_format='bare',
+                          disk_format='raw',
+                          os_glance_foo='bar')
+
+
+class ImportImagesNegativeTest(base.BaseV2ImageTest):
+    """Here we test the import operations for image"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportImagesNegativeTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImportImagesNegativeTest, cls).resource_setup()
+        cls.available_import_methods = cls.client.info_import()[
+            'import-methods']['value']
+        if not cls.available_import_methods:
+            raise cls.skipException('Server does not support '
+                                    'any import method')
+
+        cls.available_stores = cls.get_available_stores()
+        if not len(cls.available_stores) > 0:
+            raise cls.skipException(
+                'No stores configured %s' % cls.available_stores)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('c52f6a77-f522-4411-8dbe-9d14f2b1ba6b')
+    def test_image_web_download_import_with_bad_url(self):
+        """Test 'web-download' import functionalities
+
+        Make sure that web-download with invalid URL fails properly.
+        """
+        if 'web-download' not in self.available_import_methods:
+            raise self.skipException('Server does not support '
+                                     'web-download import method')
+        image = self.client.create_image(name='test',
+                                         container_format='bare',
+                                         disk_format='raw')
+        # Now try to get image details
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('queued', body['status'])
+        stores = self.get_available_stores()
+        # import image from web to backend
+        image_uri = 'http://does-not.exist/no/possible/way'
+        self.client.image_import(image['id'], method='web-download',
+                                 image_uri=image_uri,
+                                 stores=[stores[0]['id']])
+
+        start_time = int(time.time())
+        while int(time.time()) - start_time < self.client.build_timeout:
+            body = self.client.show_image(image['id'])
+            if body.get('os_glance_failed_import'):
+                # Store ended up in failed list, which is good
+                return
+            time.sleep(self.client.build_interval)
+
+        self.fail('Image never reported failed store')
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index 601826e..163063c 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -18,9 +18,11 @@
 
 
 class ImagesTagsTest(base.BaseV2ImageTest):
+    """Test image tags"""
 
     @decorators.idempotent_id('10407036-6059-4f95-a2cd-cbbbee7ed329')
     def test_update_delete_tags_for_image(self):
+        """Test adding and deleting image tags"""
         image = self.create_image(container_format='bare',
                                   disk_format='raw',
                                   visibility='private')
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 440fa36..2db4a74 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -19,11 +19,12 @@
 
 
 class ImagesTagsNegativeTest(base.BaseV2ImageTest):
+    """Negative tests of image tags"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8cd30f82-6f9a-4c6e-8034-c1b51fba43d9')
     def test_update_tags_for_non_existing_image(self):
-        # Update tag with non existing image.
+        """Update image tag with non existing image"""
         tag = data_utils.rand_name('tag')
         non_exist_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.add_image_tag,
@@ -32,7 +33,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('39c023a2-325a-433a-9eea-649bf1414b19')
     def test_delete_non_existing_tag(self):
-        # Delete non existing tag.
+        """Delete non existing image tag"""
         image = self.create_image(container_format='bare',
                                   disk_format='raw',
                                   visibility='private'
diff --git a/tempest/api/image/v2/test_versions.py b/tempest/api/image/v2/test_versions.py
index 84f1068..ef91354 100644
--- a/tempest/api/image/v2/test_versions.py
+++ b/tempest/api/image/v2/test_versions.py
@@ -17,10 +17,12 @@
 
 
 class VersionsTest(base.BaseV2ImageTest):
+    """Test image versions"""
 
     @decorators.idempotent_id('659ea30a-a17c-4317-832c-0f68ed23c31d')
     @decorators.attr(type='smoke')
     def test_list_versions(self):
+        """Test listing image versions"""
         versions = self.versions_client.list_versions()['versions']
         expected_resources = ('id', 'links', 'status')
 
diff --git a/tempest/api/network/admin/test_dhcp_agent_scheduler.py b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
index 4631ea9..2506185 100644
--- a/tempest/api/network/admin/test_dhcp_agent_scheduler.py
+++ b/tempest/api/network/admin/test_dhcp_agent_scheduler.py
@@ -18,6 +18,7 @@
 
 
 class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
+    """Test network DHCP agent scheduler extension"""
 
     @classmethod
     def skip_checks(cls):
@@ -37,11 +38,13 @@
 
     @decorators.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
     def test_list_dhcp_agent_hosting_network(self):
+        """Test Listing DHCP agents hosting a network"""
         self.admin_networks_client.list_dhcp_agents_on_hosting_network(
             self.network['id'])
 
     @decorators.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587')
     def test_list_networks_hosted_by_one_dhcp(self):
+        """Test Listing networks hosted by a DHCP agent"""
         body = self.admin_networks_client.list_dhcp_agents_on_hosting_network(
             self.network['id'])
         agents = body['agents']
@@ -61,6 +64,7 @@
 
     @decorators.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d')
     def test_add_remove_network_from_dhcp_agent(self):
+        """Test adding and removing network from a DHCP agent"""
         # The agent is now bound to the network, we can free the port
         self.ports_client.delete_port(self.port['id'])
         agent = dict()
diff --git a/tempest/api/network/admin/test_external_network_extension.py b/tempest/api/network/admin/test_external_network_extension.py
index 5bd3fce..0cec316 100644
--- a/tempest/api/network/admin/test_external_network_extension.py
+++ b/tempest/api/network/admin/test_external_network_extension.py
@@ -23,6 +23,7 @@
 
 
 class ExternalNetworksTestJSON(base.BaseAdminNetworkTest):
+    """Test external networks"""
 
     @classmethod
     def resource_setup(cls):
@@ -42,8 +43,11 @@
 
     @decorators.idempotent_id('462be770-b310-4df9-9c42-773217e4c8b1')
     def test_create_external_network(self):
-        # Create a network as an admin user specifying the
-        # external network extension attribute
+        """Test creating external network
+
+        Create a network as an admin user specifying the
+        external network extension attribute
+        """
         ext_network = self._create_network()
         # Verifies router:external parameter
         self.assertIsNotNone(ext_network['id'])
@@ -51,8 +55,11 @@
 
     @decorators.idempotent_id('4db5417a-e11c-474d-a361-af00ebef57c5')
     def test_update_external_network(self):
-        # Update a network as an admin user specifying the
-        # external network extension attribute
+        """Test updating external network
+
+        Update a network as an admin user specifying the
+        external network extension attribute
+        """
         network = self._create_network(external=False)
         self.assertFalse(network.get('router:external', False))
         update_body = {'router:external': True}
@@ -64,6 +71,7 @@
 
     @decorators.idempotent_id('39be4c9b-a57e-4ff9-b7c7-b218e209dfcc')
     def test_list_external_networks(self):
+        """Test listing external networks"""
         # Create external_net
         external_network = self._create_network()
         # List networks as a normal user and confirm the external
@@ -81,6 +89,7 @@
 
     @decorators.idempotent_id('2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2')
     def test_show_external_networks_attribute(self):
+        """Test showing external network attribute"""
         # Create external_net
         external_network = self._create_network()
         # Show an external network as a normal user and confirm the
@@ -101,9 +110,11 @@
     @testtools.skipUnless(CONF.network_feature_enabled.floating_ips,
                           'Floating ips are not availabled')
     def test_delete_external_networks_with_floating_ip(self):
-        # Verifies external network can be deleted while still holding
-        # (unassociated) floating IPs
+        """Test deleting external network with unassociated floating ips
 
+        Verifies external network can be deleted while still holding
+        (unassociated) floating IPs
+        """
         body = self.admin_networks_client.create_network(
             **{'router:external': True})
         external_network = body['network']
diff --git a/tempest/api/network/admin/test_external_networks_negative.py b/tempest/api/network/admin/test_external_networks_negative.py
index da32f2d..92731f6 100644
--- a/tempest/api/network/admin/test_external_networks_negative.py
+++ b/tempest/api/network/admin/test_external_networks_negative.py
@@ -25,16 +25,19 @@
 
 
 class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
+    """Negative tests of external network"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
-        # NOTE: External networks can be used to create both floating-ip as
-        # well as instance-ip. So, creating an instance-ip with a value of a
-        # pre-created floating-ip should be denied.
+        """Test creating port with precreated floating ip as fixed ip
 
+        NOTE: External networks can be used to create both floating-ip as
+        well as instance-ip. So, creating an instance-ip with a value of a
+        pre-created floating-ip should be denied.
+        """
         # create a floating ip
         body = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=CONF.network.public_network_id)
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index adc4dda..a8dae7c 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -23,6 +23,8 @@
 
 
 class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
+    """Test floating ips"""
+
     credentials = ['primary', 'alt', 'admin']
 
     @classmethod
@@ -55,6 +57,13 @@
 
     @decorators.idempotent_id('64f2100b-5471-4ded-b46c-ddeeeb4f231b')
     def test_list_floating_ips_from_admin_and_nonadmin(self):
+        """Test listing floating ips from admin and non admin users
+
+        This test performs below operations:
+        1. Create couple floating ips for admin and non-admin users.
+        2. Verify if admin can access all floating ips including other user
+        and non-admin user can only access its own floating ips.
+        """
         # Create floating ip from admin user
         floating_ip_admin = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
@@ -90,10 +99,11 @@
 
     @decorators.idempotent_id('32727cc3-abe2-4485-a16e-48f2d54c14f2')
     def test_create_list_show_floating_ip_with_tenant_id_by_admin(self):
+        """Verify if admin can create/list/show floating ip with tenant id"""
         # Creates a floating IP
         body = self.admin_floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
-            tenant_id=self.network['tenant_id'],
+            project_id=self.network['project_id'],
             port_id=self.port['id'])
         created_floating_ip = body['floatingip']
         self.addCleanup(
@@ -101,7 +111,7 @@
             self.floating_ips_client.delete_floatingip,
             created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['tenant_id'])
+        self.assertIsNotNone(created_floating_ip['project_id'])
         self.assertIsNotNone(created_floating_ip['floating_ip_address'])
         self.assertEqual(created_floating_ip['port_id'], self.port['id'])
         self.assertEqual(created_floating_ip['floating_network_id'],
@@ -116,8 +126,8 @@
         self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
         self.assertEqual(shown_floating_ip['floating_network_id'],
                          self.ext_net_id)
-        self.assertEqual(shown_floating_ip['tenant_id'],
-                         self.network['tenant_id'])
+        self.assertEqual(shown_floating_ip['project_id'],
+                         self.network['project_id'])
         self.assertEqual(shown_floating_ip['floating_ip_address'],
                          created_floating_ip['floating_ip_address'])
         self.assertEqual(shown_floating_ip['port_id'], self.port['id'])
diff --git a/tempest/api/network/admin/test_metering_extensions.py b/tempest/api/network/admin/test_metering_extensions.py
index 5063fef..a60cd48 100644
--- a/tempest/api/network/admin/test_metering_extensions.py
+++ b/tempest/api/network/admin/test_metering_extensions.py
@@ -92,13 +92,14 @@
 
     @decorators.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
     def test_list_metering_labels(self):
-        # Verify label filtering
+        """Verify listing metering labels"""
         body = self.admin_metering_labels_client.list_metering_labels(id=33)
         metering_labels = body['metering_labels']
         self.assertEmpty(metering_labels)
 
     @decorators.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
     def test_create_delete_metering_label_with_filters(self):
+        """Verifies creating and deleting metering label with filters"""
         # Creates a label
         name = data_utils.rand_name('metering-label-')
         description = "label created by tempest"
@@ -115,19 +116,20 @@
 
     @decorators.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
     def test_show_metering_label(self):
-        # Verifies the details of a label
+        """Verifies the details of a metering label"""
         body = self.admin_metering_labels_client.show_metering_label(
             self.metering_label['id'])
         metering_label = body['metering_label']
         self.assertEqual(self.metering_label['id'], metering_label['id'])
         self.assertEqual(self.metering_label['tenant_id'],
-                         metering_label['tenant_id'])
+                         metering_label['project_id'])
         self.assertEqual(self.metering_label['name'], metering_label['name'])
         self.assertEqual(self.metering_label['description'],
                          metering_label['description'])
 
     @decorators.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
     def test_list_metering_label_rules(self):
+        """Verifies listing metering label rules"""
         client = self.admin_metering_label_rules_client
         # Verify rule filtering
         body = client.list_metering_label_rules(id=33)
@@ -136,6 +138,7 @@
 
     @decorators.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
     def test_create_delete_metering_label_rule_with_filters(self):
+        """Verifies creating and deleting metering label rule with filters"""
         # Creates a rule
         remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
                             else "fd03::/64")
@@ -154,7 +157,7 @@
 
     @decorators.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
     def test_show_metering_label_rule(self):
-        # Verifies the details of a rule
+        """Verifies the metering details of a rule"""
         client = self.admin_metering_label_rules_client
         body = (client.show_metering_label_rule(
                 self.metering_label_rule['id']))
diff --git a/tempest/api/network/admin/test_negative_quotas.py b/tempest/api/network/admin/test_negative_quotas.py
index 0db038d..1ce9f47 100644
--- a/tempest/api/network/admin/test_negative_quotas.py
+++ b/tempest/api/network/admin/test_negative_quotas.py
@@ -45,25 +45,32 @@
         super(QuotasNegativeTest, self).setUp()
         name = data_utils.rand_name('test_project_')
         description = data_utils.rand_name('desc_')
-        self.project = identity.identity_utils(self.os_admin).create_project(
+        self.creds_client = identity.identity_utils(self.os_admin)
+        self.project = self.creds_client.create_project(
             name=name, description=description)
         self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
                         self.project['id'])
 
+    def tearDown(self):
+        super(QuotasNegativeTest, self).tearDown()
+        self.credentials_provider.cleanup_default_secgroup(
+            self.os_admin.security_groups_client, self.project['id'])
+
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf')
     def test_network_quota_exceeding(self):
+        """Test creating network when exceeding network quota will fail"""
         # Set the network quota to two
         self.admin_quotas_client.update_quotas(self.project['id'], network=2)
 
         # Create two networks
         n1 = self.admin_networks_client.create_network(
-            tenant_id=self.project['id'])
+            project_id=self.project['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.admin_networks_client.delete_network,
                         n1['network']['id'])
         n2 = self.admin_networks_client.create_network(
-            tenant_id=self.project['id'])
+            project_id=self.project['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.admin_networks_client.delete_network,
                         n2['network']['id'])
@@ -73,7 +80,7 @@
                 lib_exc.Conflict,
                 r"Quota exceeded for resources: \['network'\].*"):
             n3 = self.admin_networks_client.create_network(
-                tenant_id=self.project['id'])
+                project_id=self.project['id'])
             self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                             self.admin_networks_client.delete_network,
                             n3['network']['id'])
diff --git a/tempest/api/network/admin/test_ports.py b/tempest/api/network/admin/test_ports.py
index 289e577..5f9f29f 100644
--- a/tempest/api/network/admin/test_ports.py
+++ b/tempest/api/network/admin/test_ports.py
@@ -24,6 +24,7 @@
 
 
 class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
+    """Test extended attributes of ports"""
 
     @classmethod
     def setup_clients(cls):
@@ -41,11 +42,14 @@
     @decorators.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
     @utils.services('compute')
     def test_create_port_binding_ext_attr(self):
+        """Test creating port with extended attribute"""
         post_body = {"network_id": self.network['id'],
                      "binding:host_id": self.host_id,
                      "name": data_utils.rand_name(self.__class__.__name__)}
         body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
+        self.addCleanup(self.admin_ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(
             test_utils.call_and_ignore_notfound_exc,
             self.admin_ports_client.delete_port, port['id'])
@@ -56,10 +60,13 @@
     @decorators.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
     @utils.services('compute')
     def test_update_port_binding_ext_attr(self):
+        """Test updating port's extended attribute"""
         post_body = {"network_id": self.network['id'],
                      "name": data_utils.rand_name(self.__class__.__name__)}
         body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
+        self.addCleanup(self.admin_ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(
             test_utils.call_and_ignore_notfound_exc,
             self.admin_ports_client.delete_port, port['id'])
@@ -73,11 +80,14 @@
     @decorators.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
     @utils.services('compute')
     def test_list_ports_binding_ext_attr(self):
+        """Test updating and listing port's extended attribute"""
         # Create a new port
         post_body = {"network_id": self.network['id'],
                      "name": data_utils.rand_name(self.__class__.__name__)}
         body = self.admin_ports_client.create_port(**post_body)
         port = body['port']
+        self.addCleanup(self.admin_ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(
             test_utils.call_and_ignore_notfound_exc,
             self.admin_ports_client.delete_port, port['id'])
@@ -101,10 +111,13 @@
 
     @decorators.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
     def test_show_port_binding_ext_attr(self):
+        """Test showing port's extended attribute"""
         body = self.admin_ports_client.create_port(
             name=data_utils.rand_name(self.__class__.__name__),
             network_id=self.network['id'])
         port = body['port']
+        self.addCleanup(self.admin_ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.admin_ports_client.delete_port, port['id'])
         body = self.admin_ports_client.show_port(port['id'])
diff --git a/tempest/api/network/admin/test_quotas.py b/tempest/api/network/admin/test_quotas.py
index ef5ebb6..d8db298 100644
--- a/tempest/api/network/admin/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -67,7 +67,7 @@
         non_default_quotas = self.admin_quotas_client.list_quotas()
         found = False
         for qs in non_default_quotas['quotas']:
-            if qs['tenant_id'] == project_id:
+            if qs['project_id'] == project_id:
                 found = True
         self.assertTrue(found)
 
@@ -81,7 +81,7 @@
         self.admin_quotas_client.reset_quotas(project_id)
         non_default_quotas = self.admin_quotas_client.list_quotas()
         for q in non_default_quotas['quotas']:
-            self.assertNotEqual(project_id, q['tenant_id'])
+            self.assertNotEqual(project_id, q['project_id'])
         quota_set = self.admin_quotas_client.show_quotas(project_id)['quota']
         default_quotas = self.admin_quotas_client.show_default_quotas(
             project_id)['quota']
@@ -89,6 +89,7 @@
 
     @decorators.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
     def test_quotas(self):
+        """Test update/list/show/reset of network quotas"""
         new_quotas = {'network': 0, 'port': 0}
         self._check_quotas(new_quotas)
 
@@ -96,6 +97,7 @@
         'quota_details', 'network'), 'Quota details extension not enabled.')
     @decorators.idempotent_id('7b05ec5f-bf44-43cb-b28f-ddd72a824288')
     def test_show_quota_details(self):
+        """Test showing network quota details"""
         # Show quota details for an existing project
         quota_details = self.admin_quotas_client.show_quota_details(
             self.admin_quotas_client.tenant_id)['quota']
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index a4a057c..90e0917 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -27,6 +27,8 @@
 
 
 class RoutersAdminTest(base.BaseAdminNetworkTest):
+    """Test routers operation supported by admin"""
+
     # NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
     # as some router operations, such as enabling or disabling SNAT
     # require admin credentials by default
@@ -52,7 +54,7 @@
 
     @decorators.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
     def test_create_router_setting_project_id(self):
-        # Test creating router from admin user setting project_id.
+        """Test creating router from admin user setting project_id."""
         project = data_utils.rand_name('test_tenant_')
         description = data_utils.rand_name('desc_')
         project = identity.identity_utils(self.os_admin).create_project(
@@ -63,18 +65,18 @@
 
         name = data_utils.rand_name('router-')
         create_body = self.admin_routers_client.create_router(
-            name=name, tenant_id=project_id)
+            name=name, project_id=project_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.admin_routers_client.delete_router,
                         create_body['router']['id'])
-        self.assertEqual(project_id, create_body['router']['tenant_id'])
+        self.assertEqual(project_id, create_body['router']['project_id'])
 
     @decorators.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
     @utils.requires_ext(extension='ext-gw-mode', service='network')
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_create_router_with_default_snat_value(self):
-        # Create a router with default snat rule
+        """Create a router with default snat rule"""
         router = self._create_router(
             external_network_id=CONF.network.public_network_id)
         self._verify_router_gateway(
@@ -86,6 +88,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_create_router_with_snat_explicit(self):
+        """Test creating router with specified enable_snat value"""
         name = data_utils.rand_name('snat-router')
         # Create a router enabling snat attributes
         enable_snat_states = [False, True]
@@ -134,6 +137,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_update_router_set_gateway(self):
+        """Test updating router's gateway info"""
         router = self._create_router()
         self.routers_client.update_router(
             router['id'],
@@ -150,6 +154,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_update_router_set_gateway_with_snat_explicit(self):
+        """Test setting router's gateway with snat enabled"""
         router = self._create_router()
         self.admin_routers_client.update_router(
             router['id'],
@@ -167,6 +172,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_update_router_set_gateway_without_snat(self):
+        """Test setting router's gateway with snat not enabled"""
         router = self._create_router()
         self.admin_routers_client.update_router(
             router['id'],
@@ -183,6 +189,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_update_router_unset_gateway(self):
+        """Test unsetting router's gateway"""
         router = self._create_router(
             external_network_id=CONF.network.public_network_id)
         self.routers_client.update_router(router['id'],
@@ -199,6 +206,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_update_router_reset_gateway_without_snat(self):
+        """Test updating router's gateway to be with snat not enabled"""
         router = self._create_router(
             external_network_id=CONF.network.public_network_id)
         self.admin_routers_client.update_router(
@@ -212,6 +220,43 @@
              'enable_snat': False})
         self._verify_gateway_port(router['id'])
 
+    @decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
+    @utils.requires_ext(extension='ext-gw-mode', service='network')
+    def test_create_router_set_gateway_with_fixed_ip(self):
+        """Test creating router setting gateway with fixed ip"""
+        # At first create an external network and then use that
+        # to create address and delete
+        network_name = data_utils.rand_name(self.__class__.__name__)
+        network_1 = self.admin_networks_client.create_network(
+            name=network_name, **{'router:external': True})['network']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.admin_networks_client.delete_network,
+                        network_1['id'])
+        subnet = self.create_subnet(
+            network_1, client=self.admin_subnets_client, enable_dhcp=False)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.admin_subnets_client.delete_subnet, subnet['id'])
+        port = self.admin_ports_client.create_port(
+            name=data_utils.rand_name(self.__class__.__name__),
+            network_id=network_1['id'])['port']
+        self.admin_ports_client.delete_port(port_id=port['id'])
+        fixed_ip = {
+            'subnet_id': port['fixed_ips'][0]['subnet_id'],
+            'ip_address': port['fixed_ips'][0]['ip_address']
+        }
+        external_gateway_info = {
+            'network_id': network_1['id'],
+            'external_fixed_ips': [fixed_ip]
+        }
+        # Create a router and set gateway to fixed_ip
+        router = self.admin_routers_client.create_router(
+            external_gateway_info=external_gateway_info)['router']
+        self.admin_routers_client.delete_router(router['id'])
+        # Examine router's gateway is equal to fixed_ip
+        self.assertEqual(router['external_gateway_info'][
+                         'external_fixed_ips'][0]['ip_address'],
+                         fixed_ip['ip_address'])
+
 
 class RoutersIpV6AdminTest(RoutersAdminTest):
     _ip_version = 6
diff --git a/tempest/api/network/admin/test_routers_dvr.py b/tempest/api/network/admin/test_routers_dvr.py
index 270f802..291581c 100644
--- a/tempest/api/network/admin/test_routers_dvr.py
+++ b/tempest/api/network/admin/test_routers_dvr.py
@@ -106,14 +106,14 @@
         attribute will be set to True
         """
         name = data_utils.rand_name('router')
-        tenant_id = self.routers_client.tenant_id
+        project_id = self.routers_client.project_id
         # router needs to be in admin state down in order to be upgraded to DVR
         # l3ha routers are not upgradable to dvr, make it explicitly non ha
         router = self.admin_routers_client.create_router(name=name,
                                                          distributed=False,
                                                          admin_state_up=False,
                                                          ha=False,
-                                                         tenant_id=tenant_id)
+                                                         project_id=project_id)
         router_id = router['router']['id']
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.admin_routers_client.delete_router, router_id)
diff --git a/tempest/api/network/admin/test_routers_negative.py b/tempest/api/network/admin/test_routers_negative.py
index f605945..914c046 100644
--- a/tempest/api/network/admin/test_routers_negative.py
+++ b/tempest/api/network/admin/test_routers_negative.py
@@ -27,6 +27,7 @@
 
 
 class RoutersAdminNegativeTest(base.BaseAdminNetworkTest):
+    """Admin negative tests of routers"""
 
     @classmethod
     def skip_checks(cls):
@@ -41,6 +42,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_router_set_gateway_used_ip_returns_409(self):
+        """Test creating router with gateway set to used ip should fail"""
         # At first create a address from public_network_id
         port = self.admin_ports_client.create_port(
             name=data_utils.rand_name(self.__class__.__name__),
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index b6bf369..696d68d 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -76,6 +76,8 @@
         cls.subnetpools_client = cls.os_primary.subnetpools_client
         cls.subnets_client = cls.os_primary.subnets_client
         cls.ports_client = cls.os_primary.ports_client
+        cls.floating_ips_port_forwarding_client =\
+            cls.os_primary.floating_ips_port_forwarding_client
         cls.quotas_client = cls.os_primary.network_quotas_client
         cls.floating_ips_client = cls.os_primary.floating_ips_client
         cls.security_groups_client = cls.os_primary.security_groups_client
@@ -84,6 +86,8 @@
         cls.network_versions_client = cls.os_primary.network_versions_client
         cls.service_providers_client = cls.os_primary.service_providers_client
         cls.tags_client = cls.os_primary.tags_client
+        cls.log_resource_client = cls.os_primary.log_resource_client
+        cls.loggable_resource_client = cls.os_primary.loggable_resource_client
 
     @classmethod
     def resource_setup(cls):
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 639defb..bf9eae6 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.network import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
@@ -57,7 +55,7 @@
 
     @decorators.idempotent_id('86c3529b-1231-40de-803c-00e40882f043')
     def test_create_list_port_with_address_pair(self):
-        # Create port with allowed address pair attribute
+        """Create and list port with allowed address pair attribute"""
         allowed_address_pairs = [{'ip_address': self.ip_address,
                                   'mac_address': self.mac_address}]
         body = self.ports_client.create_port(
@@ -65,6 +63,8 @@
             name=data_utils.rand_name(self.__class__.__name__),
             allowed_address_pairs=allowed_address_pairs)
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
 
@@ -82,6 +82,8 @@
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
         if mac_address is None:
@@ -95,26 +97,39 @@
         body = self.ports_client.update_port(
             port_id, allowed_address_pairs=allowed_address_pairs)
         allowed_address_pair = body['port']['allowed_address_pairs']
-        six.assertCountEqual(self, allowed_address_pair,
-                             allowed_address_pairs)
+        # NOTE(slaweq): Attribute "active" is added to the
+        # allowed_address_pairs in the Xena release.
+        # To make our existing allowed_address_pairs API tests to be passing in
+        # both cases, with and without that "active" attribute, we need to
+        # removes that field from the allowed_address_pairs which are returned
+        # by the Neutron server.
+        # We could make expected results of those tests to be dependend on the
+        # available Neutron's API extensions but in that case existing tests
+        # may fail randomly as all tests are always using same IP addresses
+        # thus allowed_address_pair may be active=True or active=False.
+        for pair in allowed_address_pair:
+            pair.pop('active', None)
+        self.assertCountEqual(allowed_address_pair, allowed_address_pairs)
 
     @decorators.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
     def test_update_port_with_address_pair(self):
-        # Update port with allowed address pair
+        """Update port with allowed address pair"""
         self._update_port_with_address(self.ip_address)
 
     @decorators.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
     def test_update_port_with_cidr_address_pair(self):
-        # Update allowed address pair with cidr
+        """Update allowed address pair with cidr"""
         self._update_port_with_address(str(self.cidr))
 
     @decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
     def test_update_port_with_multiple_ip_mac_address_pair(self):
-        # Create an ip _address and mac_address through port create
+        """Update allowed address pair port with multiple ip and mac"""
         resp = self.ports_client.create_port(
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         newportid = resp['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        newportid)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, newportid)
         ipaddress = resp['port']['fixed_ips'][0]['ip_address']
diff --git a/tempest/api/network/test_dhcp_ipv6.py b/tempest/api/network/test_dhcp_ipv6.py
index eb31ed3..fee6af5 100644
--- a/tempest/api/network/test_dhcp_ipv6.py
+++ b/tempest/api/network/test_dhcp_ipv6.py
@@ -104,9 +104,12 @@
 
     @decorators.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
     def test_dhcpv6_stateless_eui64(self):
-        # NOTE: When subnets configured with RAs SLAAC (AOM=100) and DHCP
-        # stateless (AOM=110) both for radvd and dnsmasq, port shall receive
-        # IP address calculated from its MAC.
+        """Test eui64 ip when setting slaac and statelss for subnet
+
+        NOTE: When subnets configured with RAs SLAAC (AOM=100) and DHCP
+        stateless (AOM=110) both for radvd and dnsmasq, port shall receive
+        IP address calculated from its MAC.
+        """
         for ra_mode, add_mode in (
                 ('slaac', 'slaac'),
                 ('dhcpv6-stateless', 'dhcpv6-stateless'),
@@ -122,9 +125,12 @@
 
     @decorators.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
     def test_dhcpv6_stateless_no_ra(self):
-        # NOTE: When subnets configured with dnsmasq SLAAC and DHCP stateless
-        # and there is no radvd, port shall receive IP address calculated
-        # from its MAC and mask of subnet.
+        """Test eui64 ip when setting stateless and no radvd for subnets
+
+        NOTE: When subnets configured with dnsmasq SLAAC and DHCP stateless
+        and there is no radvd, port shall receive IP address calculated
+        from its MAC and mask of subnet.
+        """
         for ra_mode, add_mode in (
                 (None, 'slaac'),
                 (None, 'dhcpv6-stateless'),
@@ -161,8 +167,11 @@
 
     @decorators.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
     def test_dhcpv6_stateless_no_ra_no_dhcp(self):
-        # NOTE: If no radvd option and no dnsmasq option is configured
-        # port shall receive IP from fixed IPs list of subnet.
+        """Test eui64 ip when setting no radvd and no dnsmasq for subnets
+
+        NOTE: If no radvd option and no dnsmasq option is configured
+        port shall receive IP from fixed IPs list of subnet.
+        """
         real_ip, eui_ip = self._get_ips_from_subnet()
         self._clean_network()
         self.assertNotEqual(eui_ip, real_ip,
@@ -173,10 +182,13 @@
 
     @decorators.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
     def test_dhcpv6_two_subnets(self):
-        # NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
-        # stateless and other IPv6 is with DHCP stateful, port shall receive
-        # EUI-64 IP addresses from first subnet and DHCP address from second
-        # one. Order of subnet creating should be unimportant.
+        """Test eui64 ip when creating port under network with two subnets
+
+        NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
+        stateless and other IPv6 is with DHCP stateful, port shall receive
+        EUI-64 IP addresses from first subnet and DHCP address from second
+        one. Order of subnet creating should be unimportant.
+        """
         for order in ("slaac_first", "dhcp_first"):
             for ra_mode, add_mode in (
                     ('slaac', 'slaac'),
@@ -225,10 +237,13 @@
 
     @decorators.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
     def test_dhcpv6_64_subnets(self):
-        # NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
-        # stateless and other IPv4 is with DHCP of IPv4, port shall receive
-        # EUI-64 IP addresses from first subnet and IPv4 DHCP address from
-        # second one. Order of subnet creating should be unimportant.
+        """Test eui64 ip when setting slaac and stateless for subnets
+
+        NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
+        stateless and other IPv4 is with DHCP of IPv4, port shall receive
+        EUI-64 IP addresses from first subnet and IPv4 DHCP address from
+        second one. Order of subnet creating should be unimportant.
+        """
         for order in ("slaac_first", "dhcp_first"):
             for ra_mode, add_mode in (
                     ('slaac', 'slaac'),
@@ -271,8 +286,11 @@
 
     @decorators.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
     def test_dhcp_stateful(self):
-        # NOTE: With all options below, DHCPv6 shall allocate address from
-        # subnet pool to port.
+        """Test creating port when setting stateful for subnets
+
+        NOTE: With all options below, DHCPv6 shall allocate address from
+        subnet pool to port.
+        """
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
@@ -294,9 +312,12 @@
 
     @decorators.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
     def test_dhcp_stateful_fixedips(self):
-        # NOTE: With all options below, port shall be able to get
-        # requested IP from fixed IP range not depending on
-        # DHCP stateful (not SLAAC!) settings configured.
+        """Test creating port with fixed ip when setting stateful for subnets
+
+        NOTE: With all options below, port shall be able to get
+        requested IP from fixed IP range not depending on
+        DHCP stateful (not SLAAC!) settings configured.
+        """
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
@@ -324,8 +345,11 @@
 
     @decorators.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
     def test_dhcp_stateful_fixedips_outrange(self):
-        # NOTE: When port gets IP address from fixed IP range it
-        # shall be checked if it's from subnets range.
+        """Test creating port with fixed ip that is not in the range
+
+        NOTE: When port gets IP address from fixed IP range it
+        shall be checked if it's from subnets range.
+        """
         kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
                   'ipv6_address_mode': 'dhcpv6-stateful'}
         subnet = self.create_subnet(self.network, **kwargs)
@@ -342,8 +366,11 @@
 
     @decorators.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
     def test_dhcp_stateful_fixedips_duplicate(self):
-        # NOTE: When port gets IP address from fixed IP range it
-        # shall be checked if it's not duplicate.
+        """Test creating port with duplicate fixed ip
+
+        NOTE: When port gets IP address from fixed IP range it
+        shall be checked if it's not duplicate.
+        """
         kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
                   'ipv6_address_mode': 'dhcpv6-stateful'}
         subnet = self.create_subnet(self.network, **kwargs)
@@ -376,8 +403,11 @@
 
     @decorators.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
     def test_dhcp_stateful_router(self):
-        # NOTE: With all options below the router interface shall
-        # receive DHCPv6 IP address from allocation pool.
+        """Test creating router with dhcp stateful
+
+        NOTE: With all options below the router interface shall
+        receive DHCPv6 IP address from allocation pool.
+        """
         for ra_mode, add_mode in (
                 ('dhcpv6-stateful', 'dhcpv6-stateful'),
                 ('dhcpv6-stateful', None),
diff --git a/tempest/api/network/test_extensions.py b/tempest/api/network/test_extensions.py
index 4804ada..e116d7c 100644
--- a/tempest/api/network/test_extensions.py
+++ b/tempest/api/network/test_extensions.py
@@ -32,7 +32,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('ef28c7e6-e646-4979-9d67-deb207bc5564')
     def test_list_show_extensions(self):
-        # List available extensions for the project
+        """List available extensions and show the detail of each extension"""
         expected_alias = ['security-group', 'l3_agent_scheduler',
                           'ext-gw-mode', 'binding', 'quotas',
                           'agent', 'dhcp_agent_scheduler', 'provider',
diff --git a/tempest/api/network/test_extra_dhcp_options.py b/tempest/api/network/test_extra_dhcp_options.py
index d363081..bc6418a 100644
--- a/tempest/api/network/test_extra_dhcp_options.py
+++ b/tempest/api/network/test_extra_dhcp_options.py
@@ -58,7 +58,7 @@
 
     @decorators.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
     def test_create_list_port_with_extra_dhcp_options(self):
-        # Create a port with Extra DHCP Options
+        """Test creating a port with Extra DHCP Options and list those"""
         body = self.ports_client.create_port(
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__),
@@ -76,7 +76,7 @@
 
     @decorators.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
     def test_update_show_port_with_extra_dhcp_options(self):
-        # Update port with extra dhcp options
+        """Test updating port with extra DHCP options and show that port"""
         name = data_utils.rand_name('new-port-name')
         self.ports_client.update_port(
             self.port['id'],
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index aaa5497..64f6e80 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -15,9 +15,9 @@
 
 from tempest.api.network import base
 from tempest.common import utils
-from tempest.common.utils import data_utils
 from tempest.common.utils import net_utils
 from tempest import config
+from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
@@ -66,13 +66,14 @@
         cls.create_router_interface(cls.router['id'], cls.subnet['id'])
         # Create two ports one each for Creation and Updating of floatingIP
         cls.ports = []
-        for i in range(2):
+        for _ in range(2):
             port = cls.create_port(cls.network)
             cls.ports.append(port)
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
     def test_create_list_show_update_delete_floating_ip(self):
+        """Test create/list/show/update/delete floating ip"""
         # Creates a floating IP
         body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
@@ -83,7 +84,7 @@
             self.floating_ips_client.delete_floatingip,
             created_floating_ip['id'])
         self.assertIsNotNone(created_floating_ip['id'])
-        self.assertIsNotNone(created_floating_ip['tenant_id'])
+        self.assertIsNotNone(created_floating_ip['project_id'])
         self.assertIsNotNone(created_floating_ip['floating_ip_address'])
         self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
         self.assertEqual(created_floating_ip['floating_network_id'],
@@ -97,8 +98,8 @@
         self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
         self.assertEqual(shown_floating_ip['floating_network_id'],
                          self.ext_net_id)
-        self.assertEqual(shown_floating_ip['tenant_id'],
-                         created_floating_ip['tenant_id'])
+        self.assertEqual(shown_floating_ip['project_id'],
+                         created_floating_ip['project_id'])
         self.assertEqual(shown_floating_ip['floating_ip_address'],
                          created_floating_ip['floating_ip_address'])
         self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
@@ -133,6 +134,14 @@
 
     @decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
     def test_floating_ip_delete_port(self):
+        """Test deleting floating ip's port
+
+        1. Create a floating ip
+        2. Create a port
+        3. Update the floating ip's port_id to the created port
+        4. Delete the port
+        5. Verify that the port details are cleared from the floating ip
+        """
         # Create a floating IP
         body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
@@ -163,6 +172,7 @@
 
     @decorators.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
     def test_floating_ip_update_different_router(self):
+        """Test associating a floating ip to a port on different router"""
         # Associate a floating IP to a port on a router
         body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
@@ -211,6 +221,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
     def test_create_floating_ip_specifying_a_fixed_ip_address(self):
+        """Test creating floating ip with specified fixed ip"""
         body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id,
             port_id=self.ports[1]['id'],
@@ -230,6 +241,12 @@
 
     @decorators.idempotent_id('45c4c683-ea97-41ef-9c51-5e9802f2f3d7')
     def test_create_update_floatingip_with_port_multiple_ip_address(self):
+        """Test updating floating ip's fixed_ips to another ip of same port
+
+        First we create a port with 2 fixed ips, then we create a floating ip
+        with one of the fixed ips, and then we update the floating ip to
+        another fixed ip of that port.
+        """
         # Find out ips that can be used for tests
         list_ips = net_utils.get_unused_ip_addresses(
             self.ports_client,
diff --git a/tempest/api/network/test_floating_ips_negative.py b/tempest/api/network/test_floating_ips_negative.py
index 1688c9d..80df5d6 100644
--- a/tempest/api/network/test_floating_ips_negative.py
+++ b/tempest/api/network/test_floating_ips_negative.py
@@ -58,6 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('22996ea8-4a81-4b27-b6e1-fa5df92fa5e8')
     def test_create_floatingip_with_port_ext_net_unreachable(self):
+        """Creating floating ip when port's external network is unreachable"""
         self.assertRaises(
             lib_exc.NotFound, self.floating_ips_client.create_floatingip,
             floating_network_id=self.ext_net_id, port_id=self.port['id'],
@@ -67,6 +68,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('50b9aeb4-9f0b-48ee-aa31-fa955a48ff54')
     def test_create_floatingip_in_private_network(self):
+        """Test creating floating in private network"""
         self.assertRaises(lib_exc.BadRequest,
                           self.floating_ips_client.create_floatingip,
                           floating_network_id=self.network['id'],
@@ -77,6 +79,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6b3b8797-6d43-4191-985c-c48b773eb429')
     def test_associate_floatingip_port_ext_net_unreachable(self):
+        """Associate floating ip to port with unreachable external network"""
         # Create floating ip
         body = self.floating_ips_client.create_floatingip(
             floating_network_id=self.ext_net_id)
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index eba1f6c..caaf964 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 import netaddr
-import six
 import testtools
 
 from tempest.api.network import base
@@ -29,6 +28,7 @@
 
 
 class BaseNetworkTestResources(base.BaseNetworkTest):
+    """Test networks"""
 
     @classmethod
     def resource_setup(cls):
@@ -158,6 +158,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
     def test_create_update_delete_network_subnet(self):
+        """Verify creating, updating and deleting network subnet"""
         # Create a network
         network = self.create_network()
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
@@ -183,7 +184,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
     def test_show_network(self):
-        # Verify the details of a network
+        """Verify the details of a network"""
         body = self.networks_client.show_network(self.network['id'])
         network = body['network']
         for key in ['id', 'name']:
@@ -191,7 +192,7 @@
 
     @decorators.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
     def test_show_network_fields(self):
-        # Verify specific fields of a network
+        """Verify specific fields of a network"""
         fields = ['id', 'name']
         if utils.is_extension_enabled('net-mtu', 'network'):
             fields.append('mtu')
@@ -207,7 +208,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
     def test_list_networks(self):
-        # Verify the network exists in the list of all networks
+        """Verify the network exists in the list of all networks"""
         body = self.networks_client.list_networks()
         networks = [network['id'] for network in body['networks']
                     if network['id'] == self.network['id']]
@@ -215,7 +216,7 @@
 
     @decorators.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
     def test_list_networks_fields(self):
-        # Verify specific fields of the networks
+        """Verify specific fields of the networks"""
         fields = ['id', 'name']
         if utils.is_extension_enabled('net-mtu', 'network'):
             fields.append('mtu')
@@ -228,7 +229,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
     def test_show_subnet(self):
-        # Verify the details of a subnet
+        """Verify the details of a subnet"""
         body = self.subnets_client.show_subnet(self.subnet['id'])
         subnet = body['subnet']
         self.assertNotEmpty(subnet, "Subnet returned has no fields")
@@ -238,7 +239,7 @@
 
     @decorators.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
     def test_show_subnet_fields(self):
-        # Verify specific fields of a subnet
+        """Verify specific fields of a subnet"""
         fields = ['id', 'network_id']
         body = self.subnets_client.show_subnet(self.subnet['id'],
                                                fields=fields)
@@ -250,7 +251,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
     def test_list_subnets(self):
-        # Verify the subnet exists in the list of all subnets
+        """Verify the subnet exists in the list of all subnets"""
         body = self.subnets_client.list_subnets()
         subnets = [subnet['id'] for subnet in body['subnets']
                    if subnet['id'] == self.subnet['id']]
@@ -258,7 +259,7 @@
 
     @decorators.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
     def test_list_subnets_fields(self):
-        # Verify specific fields of subnets
+        """Verify specific fields of subnets"""
         fields = ['id', 'network_id']
         body = self.subnets_client.list_subnets(fields=fields)
         subnets = body['subnets']
@@ -268,6 +269,7 @@
 
     @decorators.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
     def test_delete_network_with_subnet(self):
+        """Verify deleting network with subnet"""
         # Creates a network
         network = self.create_network()
         net_id = network['id']
@@ -287,34 +289,41 @@
 
     @decorators.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
     def test_create_delete_subnet_without_gateway(self):
+        """Verify creating and deleting subnet without gateway"""
         self._create_verify_delete_subnet()
 
     @decorators.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
     def test_create_delete_subnet_with_gw(self):
+        """Verify creating and deleting subnet with gateway"""
         self._create_verify_delete_subnet(
             **self.subnet_dict(['gateway']))
 
     @decorators.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
     def test_create_delete_subnet_with_allocation_pools(self):
+        """Verify creating and deleting subnet with allocation pools"""
         self._create_verify_delete_subnet(
             **self.subnet_dict(['allocation_pools']))
 
     @decorators.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
     def test_create_delete_subnet_with_gw_and_allocation_pools(self):
+        """Verify create/delete subnet with gateway and allocation pools"""
         self._create_verify_delete_subnet(**self.subnet_dict(
             ['gateway', 'allocation_pools']))
 
     @decorators.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
     def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
+        """Verify create/delete subnet with host routes and name servers"""
         self._create_verify_delete_subnet(
             **self.subnet_dict(['host_routes', 'dns_nameservers']))
 
     @decorators.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
     def test_create_delete_subnet_with_dhcp_enabled(self):
+        """Verify create/delete subnet with dhcp enabled"""
         self._create_verify_delete_subnet(enable_dhcp=True)
 
     @decorators.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
     def test_update_subnet_gw_dns_host_routes_dhcp(self):
+        """Verify updating subnet's gateway/nameserver/routes/dhcp"""
         network = self.create_network()
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.networks_client.delete_network, network['id'])
@@ -349,6 +358,7 @@
 
     @decorators.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
     def test_create_delete_subnet_all_attributes(self):
+        """Verify create/delete subnet's all attributes"""
         self._create_verify_delete_subnet(
             enable_dhcp=True,
             **self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
@@ -359,6 +369,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_external_network_visibility(self):
+        """Verify external network's visibility"""
         public_network_id = CONF.network.public_network_id
 
         # find external network matching public_network_id
@@ -394,6 +405,7 @@
     @utils.requires_ext(extension="standard-attr-description",
                         service="network")
     def test_create_update_network_description(self):
+        """Verify creating and updating network's description"""
         body = self.create_network(description='d1')
         self.assertEqual('d1', body['description'])
         net_id = body['id']
@@ -454,6 +466,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
     def test_bulk_create_delete_network(self):
+        """Verify creating and deleting multiple networks in one request"""
         # Creates 2 networks in one request
         network_list = [{'name': data_utils.rand_name('network-')},
                         {'name': data_utils.rand_name('network-')}]
@@ -470,6 +483,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
     def test_bulk_create_delete_subnet(self):
+        """Verify creating and deleting multiple subnets in one request"""
         networks = [self.create_network(), self.create_network()]
         # Creates 2 subnets in one request
         cidrs = [subnet_cidr
@@ -499,6 +513,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
     def test_bulk_create_delete_port(self):
+        """Verify creating and deleting multiple ports in one request"""
         networks = [self.create_network(), self.create_network()]
         # Creates 2 ports in one request
         names = [data_utils.rand_name('port-') for i in range(len(networks))]
@@ -532,6 +547,7 @@
 
     @decorators.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
     def test_create_delete_subnet_with_gw(self):
+        """Verify creating and deleting subnet with gateway"""
         net = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
         gateway = str(netaddr.IPAddress(net.first + 2))
         network = self.create_network()
@@ -541,6 +557,7 @@
 
     @decorators.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
     def test_create_delete_subnet_with_default_gw(self):
+        """Verify creating and deleting subnet without specified gateway"""
         net = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
         gateway_ip = str(netaddr.IPAddress(net.first + 1))
         network = self.create_network()
@@ -550,6 +567,12 @@
 
     @decorators.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
     def test_create_list_subnet_with_no_gw64_one_network(self):
+        """Verify subnets with and without gateway are in one network
+
+        First we create a network, then we create one ipv6 subnet with
+        gateway and one ipv4 subnet without gateway, the two subnets
+        should be in the same network
+        """
         network = self.create_network()
         ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
         subnet1 = self.create_subnet(network,
@@ -571,9 +594,9 @@
         subnets = [sub['id'] for sub in body['subnets']
                    if sub['network_id'] == network['id']]
         test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
-        six.assertCountEqual(self, subnets,
-                             test_subnet_ids,
-                             'Subnet are not in the same network')
+        self.assertCountEqual(subnets,
+                              test_subnet_ids,
+                              'Subnet are not in the same network')
 
 
 class NetworksIpV6TestAttrs(BaseNetworkTestResources):
@@ -589,6 +612,7 @@
 
     @decorators.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
     def test_create_delete_subnet_with_v6_attributes_stateful(self):
+        """Test create/delete subnet with ipv6 attributes stateful"""
         self._create_verify_delete_subnet(
             gateway=self._subnet_data[self._ip_version]['gateway'],
             ipv6_ra_mode='dhcpv6-stateful',
@@ -596,12 +620,14 @@
 
     @decorators.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
     def test_create_delete_subnet_with_v6_attributes_slaac(self):
+        """Test create/delete subnet with ipv6 attributes slaac"""
         self._create_verify_delete_subnet(
             ipv6_ra_mode='slaac',
             ipv6_address_mode='slaac')
 
     @decorators.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
     def test_create_delete_subnet_with_v6_attributes_stateless(self):
+        """Test create/delete subnet with ipv6 attributes stateless"""
         self._create_verify_delete_subnet(
             ipv6_ra_mode='dhcpv6-stateless',
             ipv6_address_mode='dhcpv6-stateless')
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index 3af67dd..0525484 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -21,10 +21,12 @@
 
 
 class NetworksNegativeTestJSON(base.BaseNetworkTest):
+    """Negative tests of network"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9293e937-824d-42d2-8d5b-e985ea67002a')
     def test_show_non_existent_network(self):
+        """Test showing non existent network"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.networks_client.show_network,
                           non_exist_id)
@@ -32,6 +34,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d746b40c-5e09-4043-99f7-cba1be8b70df')
     def test_show_non_existent_subnet(self):
+        """Test showing non existent subnet"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.subnets_client.show_subnet,
                           non_exist_id)
@@ -39,6 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
     def test_show_non_existent_port(self):
+        """Test showing non existent port"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.ports_client.show_port,
                           non_exist_id)
@@ -46,6 +50,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('98bfe4e3-574e-4012-8b17-b2647063de87')
     def test_update_non_existent_network(self):
+        """Test updating non existent network"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(
             lib_exc.NotFound, self.networks_client.update_network,
@@ -54,6 +59,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('03795047-4a94-4120-a0a1-bd376e36fd4e')
     def test_delete_non_existent_network(self):
+        """Test deleting non existent network"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.networks_client.delete_network,
@@ -62,6 +68,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1cc47884-ac52-4415-a31c-e7ce5474a868')
     def test_update_non_existent_subnet(self):
+        """Test updating non existent subnet"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.subnets_client.update_subnet,
                           non_exist_id, name='new_name')
@@ -69,6 +76,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a176c859-99fb-42ec-a208-8a85b552a239')
     def test_delete_non_existent_subnet(self):
+        """Test deleting non existent subnet"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.subnets_client.delete_subnet, non_exist_id)
@@ -76,6 +84,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('13d3b106-47e6-4b9b-8d53-dae947f092fe')
     def test_create_port_on_non_existent_network(self):
+        """Test creating port on non existent network"""
         non_exist_net_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.ports_client.create_port,
@@ -85,6 +94,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('cf8eef21-4351-4f53-adcd-cc5cb1e76b92')
     def test_update_non_existent_port(self):
+        """Test updating non existent port"""
         non_exist_port_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.ports_client.update_port,
                           non_exist_port_id, name='new_name')
@@ -92,6 +102,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('49ec2bbd-ac2e-46fd-8054-798e679ff894')
     def test_delete_non_existent_port(self):
+        """Test deleting non existent port"""
         non_exist_port_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.ports_client.delete_port, non_exist_port_id)
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 10121de..190f7e0 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -16,7 +16,6 @@
 import ipaddress
 
 import netaddr
-import six
 import testtools
 
 from tempest.api.network import base_security_groups as sec_base
@@ -52,7 +51,8 @@
 
     def _create_subnet(self, network, gateway='',
                        cidr=None, mask_bits=None, **kwargs):
-        subnet = self.create_subnet(network, gateway, cidr, mask_bits)
+        subnet = self.create_subnet(
+            network, gateway, cidr, mask_bits, **kwargs)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.subnets_client.delete_subnet, subnet['id'])
         return subnet
@@ -70,12 +70,15 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
     def test_create_update_delete_port(self):
+        """Test creating, updating and deleting port"""
         # Verify port creation
         body = self.ports_client.create_port(
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         port = body['port']
         # Schedule port deletion with verification upon test completion
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(self._delete_port, port['id'])
         self.assertTrue(port['admin_state_up'])
         # Verify port update
@@ -89,6 +92,7 @@
 
     @decorators.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
     def test_create_bulk_port(self):
+        """Test creating multiple ports in a single request"""
         network1 = self.network
         network2 = self._create_network()
         network_list = [network1['id'], network2['id']]
@@ -97,6 +101,10 @@
         created_ports = body['ports']
         port1 = created_ports[0]
         port2 = created_ports[1]
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port1['id'])
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port2['id'])
         self.addCleanup(self._delete_port, port1['id'])
         self.addCleanup(self._delete_port, port2['id'])
         self.assertEqual(port1['network_id'], network1['id'])
@@ -107,6 +115,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
     def test_create_port_in_allowed_allocation_pools(self):
+        """Test creating port in allowed allocation pools"""
         network = self._create_network()
         net_id = network['id']
         address = self.cidr
@@ -123,6 +132,8 @@
         body = self.ports_client.create_port(
             network_id=net_id,
             name=data_utils.rand_name(self.__class__.__name__))
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        body['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, body['port']['id'])
         port = body['port']
@@ -136,7 +147,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
     def test_show_port(self):
-        # Verify the details of port
+        """Verify the details of port"""
         body = self.ports_client.show_port(self.port['id'])
         port = body['port']
         self.assertIn('id', port)
@@ -152,7 +163,7 @@
 
     @decorators.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
     def test_show_port_fields(self):
-        # Verify specific fields of a port
+        """Verify specific fields of a port"""
         fields = ['id', 'mac_address']
         body = self.ports_client.show_port(self.port['id'],
                                            fields=fields)
@@ -164,7 +175,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
     def test_list_ports(self):
-        # Verify the port exists in the list of all ports
+        """Verify the port exists in the list of all ports"""
         body = self.ports_client.list_ports()
         ports = [port['id'] for port in body['ports']
                  if port['id'] == self.port['id']]
@@ -172,6 +183,7 @@
 
     @decorators.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
     def test_port_list_filter_by_ip(self):
+        """Test listing ports filtered by ip"""
         # Create network and subnet
         network = self._create_network()
         self._create_subnet(network)
@@ -179,11 +191,15 @@
         port_1 = self.ports_client.create_port(
             network_id=network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_1['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_1['port']['id'])
         port_2 = self.ports_client.create_port(
             network_id=network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_2['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_2['port']['id'])
         # List ports filtered by fixed_ips
@@ -192,9 +208,9 @@
         port_list = self.ports_client.list_ports(fixed_ips=fixed_ips)
         # Check that we got the desired port
         ports = port_list['ports']
-        tenant_ids = set([port['tenant_id'] for port in ports])
-        self.assertEqual(len(tenant_ids), 1,
-                         'Ports from multiple tenants are in the list resp')
+        project_ids = set([port['project_id'] for port in ports])
+        self.assertEqual(len(project_ids), 1,
+                         'Ports from multiple projects are in the list resp')
         port_ids = [port['id'] for port in ports]
         fixed_ips = [port['fixed_ips'] for port in ports]
         port_net_ids = [port['network_id'] for port in ports]
@@ -211,21 +227,22 @@
         utils.is_extension_enabled('ip-substring-filtering', 'network'),
         'ip-substring-filtering extension not enabled.')
     def test_port_list_filter_by_ip_substr(self):
+        """Test listing ports filtered by part of ip address string"""
         # Create network and subnet
         network = self._create_network()
         subnet = self._create_subnet(network)
         # Get two IP addresses
         ip_address_1 = None
         ip_address_2 = None
-        ip_network = ipaddress.ip_network(six.text_type(subnet['cidr']))
+        ip_network = ipaddress.ip_network(str(subnet['cidr']))
         for ip in ip_network:
             if ip == ip_network.network_address:
                 continue
             if ip_address_1 is None:
-                ip_address_1 = six.text_type(ip)
+                ip_address_1 = str(ip)
             else:
                 ip_address_2 = ip_address_1
-                ip_address_1 = six.text_type(ip)
+                ip_address_1 = str(ip)
                 # Make sure these two IP addresses have different substring
                 if ip_address_1[:-1] != ip_address_2[:-1]:
                     break
@@ -236,6 +253,8 @@
             network_id=network['id'],
             name=data_utils.rand_name(self.__class__.__name__),
             fixed_ips=fixed_ips)
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_1['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_1['port']['id'])
         fixed_ips = [{'subnet_id': subnet['id'], 'ip_address': ip_address_2}]
@@ -243,6 +262,8 @@
             network_id=network['id'],
             name=data_utils.rand_name(self.__class__.__name__),
             fixed_ips=fixed_ips)
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_2['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_2['port']['id'])
 
@@ -289,6 +310,7 @@
 
     @decorators.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
     def test_port_list_filter_by_router_id(self):
+        """Test listing ports filtered by router id"""
         # Create a router
         network = self._create_network()
         self._create_subnet(network)
@@ -301,6 +323,8 @@
         # Add router interface to port created above
         self.routers_client.add_router_interface(router['id'],
                                                  port_id=port['port']['id'])
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.routers_client.remove_router_interface,
                         router['id'], port_id=port['port']['id'])
@@ -313,7 +337,7 @@
 
     @decorators.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
     def test_list_ports_fields(self):
-        # Verify specific fields of ports
+        """Verify specific fields of ports"""
         fields = ['id', 'mac_address']
         body = self.ports_client.list_ports(fields=fields)
         ports = body['ports']
@@ -324,6 +348,7 @@
 
     @decorators.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
     def test_create_update_port_with_second_ip(self):
+        """Test updating port from 2 fixed ips to 1 fixed ip and vice versa"""
         # Create a network with two subnets
         network = self._create_network()
         subnet_1 = self._create_subnet(network)
@@ -336,6 +361,8 @@
         # Create a port with multiple IP addresses
         port = self.create_port(network,
                                 fixed_ips=fixed_ips)
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port['id'])
         self.assertEqual(2, len(port['fixed_ips']))
@@ -379,6 +406,8 @@
             "admin_state_up": True,
             "fixed_ips": fixed_ip_1}
         body = self.ports_client.create_port(**post_body)
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        body['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, body['port']['id'])
         port = body['port']
@@ -410,6 +439,12 @@
         utils.is_extension_enabled('security-group', 'network'),
         'security-group extension not enabled.')
     def test_update_port_with_security_group_and_extra_attributes(self):
+        """Test updating port's security_group along with extra attributes
+
+        First we create a port with one security group, and then we update the
+        port's security_group, in the same update request we also change
+        the port's fixed ips.
+        """
         self._update_port_with_security_groups(
             [data_utils.rand_name('secgroup')])
 
@@ -418,12 +453,19 @@
         utils.is_extension_enabled('security-group', 'network'),
         'security-group extension not enabled.')
     def test_update_port_with_two_security_groups_and_extra_attributes(self):
+        """Test updating port with two security_groups and extra attributes
+
+        First we create a port with one security group, and then we update the
+        port to two security_groups, in the same update request we also change
+        the port's fixed ips.
+        """
         self._update_port_with_security_groups(
             [data_utils.rand_name('secgroup'),
              data_utils.rand_name('secgroup')])
 
     @decorators.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
     def test_create_show_delete_port_user_defined_mac(self):
+        """Test creating port with user defined mac address"""
         # Create a port for a legal mac
         body = self.ports_client.create_port(
             network_id=self.network['id'],
@@ -436,6 +478,8 @@
             network_id=self.network['id'],
             mac_address=free_mac_address,
             name=data_utils.rand_name(self.__class__.__name__))
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        body['port']['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, body['port']['id'])
         port = body['port']
@@ -450,9 +494,12 @@
         utils.is_extension_enabled('security-group', 'network'),
         'security-group extension not enabled.')
     def test_create_port_with_no_securitygroups(self):
+        """Test creating port without security groups"""
         network = self._create_network()
         self._create_subnet(network)
         port = self.create_port(network, security_groups=[])
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port['id'])
         self.assertIsNotNone(port['security_groups'])
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index ad316d1..c03a8a2 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -27,6 +27,7 @@
 
 
 class RoutersTest(base.BaseNetworkTest):
+    """Test routers"""
 
     def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
         interface = self.routers_client.add_router_interface(
@@ -53,6 +54,7 @@
     @testtools.skipUnless(CONF.network.public_network_id,
                           'The public_network_id option must be specified.')
     def test_create_show_list_update_delete_router(self):
+        """Test create/show/list/update/delete of a router"""
         # Create a router
         router_name = data_utils.rand_name(self.__class__.__name__ + '-router')
         router = self.create_router(
@@ -87,6 +89,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
     def test_add_remove_router_interface_with_subnet_id(self):
+        """Test adding and removing router interface with subnet id"""
         network_name = data_utils.rand_name(self.__class__.__name__)
         network = self.networks_client.create_network(
             name=network_name)['network']
@@ -113,6 +116,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
     def test_add_remove_router_interface_with_port_id(self):
+        """Test adding and removing router interface with port id"""
         network_name = data_utils.rand_name(self.__class__.__name__)
         network = self.networks_client.create_network(
             name=network_name)['network']
@@ -142,42 +146,10 @@
         self.routers_client.remove_router_interface(
             router['id'], port_id=port_body['port']['id'])
 
-    @decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
-    @utils.requires_ext(extension='ext-gw-mode', service='network')
-    @testtools.skipUnless(CONF.network.public_network_id,
-                          'The public_network_id option must be specified.')
-    @decorators.skip_because(bug='1676207')
-    def test_create_router_set_gateway_with_fixed_ip(self):
-        # Don't know public_network_address, so at first create address
-        # from public_network and delete
-        port = self.admin_ports_client.create_port(
-            name=data_utils.rand_name(self.__class__.__name__),
-            network_id=CONF.network.public_network_id)['port']
-        self.admin_ports_client.delete_port(port_id=port['id'])
-
-        fixed_ip = {
-            'subnet_id': port['fixed_ips'][0]['subnet_id'],
-            'ip_address': port['fixed_ips'][0]['ip_address']
-        }
-        external_gateway_info = {
-            'network_id': CONF.network.public_network_id,
-            'external_fixed_ips': [fixed_ip]
-        }
-
-        # Create a router and set gateway to fixed_ip
-        router = self.admin_routers_client.create_router(
-            external_gateway_info=external_gateway_info)['router']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.admin_routers_client.delete_router,
-                        router_id=router['id'])
-        # Examine router's gateway is equal to fixed_ip
-        self.assertEqual(router['external_gateway_info'][
-                         'external_fixed_ips'][0]['ip_address'],
-                         fixed_ip['ip_address'])
-
     @decorators.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
     @utils.requires_ext(extension='extraroute', service='network')
     def test_update_delete_extra_route(self):
+        """Test updating and deleting router with extra route"""
         # Create different cidr for each subnet to avoid cidr duplicate
         # The cidr starts from project_cidr
         next_cidr = self.cidr
@@ -248,6 +220,7 @@
 
     @decorators.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
     def test_update_router_admin_state(self):
+        """Test updating router's admin state"""
         router = self.create_router()
         self.addCleanup(self.delete_router, router)
         self.assertFalse(router['admin_state_up'])
@@ -261,6 +234,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
     def test_add_multiple_router_interfaces(self):
+        """Test adding multiple router interfaces"""
         network_name = data_utils.rand_name(self.__class__.__name__)
         network01 = self.networks_client.create_network(
             name=network_name)['network']
@@ -291,6 +265,7 @@
 
     @decorators.idempotent_id('96522edf-b4b5-45d9-8443-fa11c26e6eff')
     def test_router_interface_port_update_with_fixed_ip(self):
+        """Test updating router interface port's fixed ip"""
         network_name = data_utils.rand_name(self.__class__.__name__)
         network = self.networks_client.create_network(
             name=network_name)['network']
diff --git a/tempest/api/network/test_routers_negative.py b/tempest/api/network/test_routers_negative.py
index 0b61860..10a2706 100644
--- a/tempest/api/network/test_routers_negative.py
+++ b/tempest/api/network/test_routers_negative.py
@@ -21,6 +21,7 @@
 
 
 class RoutersNegativeTest(base.BaseNetworkTest):
+    """Negative tests of routers"""
 
     @classmethod
     def skip_checks(cls):
@@ -39,6 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
     def test_router_add_gateway_invalid_network_returns_404(self):
+        """Test adding gateway with invalid network for router"""
         self.assertRaises(lib_exc.NotFound,
                           self.routers_client.update_router,
                           self.router['id'],
@@ -48,6 +50,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
     def test_router_add_gateway_net_not_external_returns_400(self):
+        """Test adding gateway with not external network for router"""
         alt_network = self.create_network()
         sub_cidr = self.cidr.next()
         self.create_subnet(alt_network, cidr=sub_cidr)
@@ -60,6 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
     def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
+        """Test adding router interface which is on overlapping subnets"""
         network01 = self.create_network(
             network_name=data_utils.rand_name('router-network01-'))
         network02 = self.create_network(
@@ -79,6 +83,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
     def test_router_remove_interface_in_use_returns_409(self):
+        """Test removing in-use interface from router"""
         self.routers_client.add_router_interface(self.router['id'],
                                                  subnet_id=self.subnet['id'])
         self.addCleanup(self.routers_client.remove_router_interface,
@@ -90,6 +95,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
     def test_show_non_existent_router_returns_404(self):
+        """Test showing non existent router"""
         router = data_utils.rand_name('non_exist_router')
         self.assertRaises(lib_exc.NotFound, self.routers_client.show_router,
                           router)
@@ -97,6 +103,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
     def test_update_non_existent_router_returns_404(self):
+        """Test updating non existent router"""
         router = data_utils.rand_name('non_exist_router')
         self.assertRaises(lib_exc.NotFound, self.routers_client.update_router,
                           router, name="new_name")
@@ -104,6 +111,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
     def test_delete_non_existent_router_returns_404(self):
+        """Test deleting non existent router"""
         router = data_utils.rand_name('non_exist_router')
         self.assertRaises(lib_exc.NotFound, self.routers_client.delete_router,
                           router)
@@ -114,6 +122,7 @@
 
 
 class DvrRoutersNegativeTest(base.BaseNetworkTest):
+    """Negative tests of DVR router"""
 
     @classmethod
     def skip_checks(cls):
@@ -125,5 +134,6 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
     def test_router_create_tenant_distributed_returns_forbidden(self):
+        """Non admin user is not allowed to create distributed router"""
         self.assertRaises(lib_exc.Forbidden, self.create_router,
                           distributed=True)
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index ef19122..d75acfc 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -21,6 +21,7 @@
 
 
 class SecGroupTest(base.BaseSecGroupTest):
+    """Test security groups"""
 
     @classmethod
     def skip_checks(cls):
@@ -67,7 +68,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
     def test_list_security_groups(self):
-        # Verify the security group belonging to project exist in list
+        """Verify that default security group exist"""
         body = self.security_groups_client.list_security_groups()
         security_groups = body['security_groups']
         found = None
@@ -80,6 +81,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
     def test_create_list_update_show_delete_security_group(self):
+        """Verify create/list/update/show/delete of security group"""
         group_create_body, _ = self._create_security_group()
 
         # List security groups and verify if created group is there in response
@@ -111,6 +113,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
     def test_create_show_delete_security_group_rule(self):
+        """Test create/show/delete of security group rule"""
         group_create_body, _ = self._create_security_group()
 
         # Create rules for each protocol
@@ -191,7 +194,7 @@
 
     @decorators.idempotent_id('c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b')
     def test_create_security_group_rule_with_remote_group_id(self):
-        # Verify creating security group rule with remote_group_id works
+        """Verify creating security group rule with remote_group_id works"""
         sg1_body, _ = self._create_security_group()
         sg2_body, _ = self._create_security_group()
 
@@ -209,7 +212,7 @@
 
     @decorators.idempotent_id('16459776-5da2-4634-bce4-4b55ee3ec188')
     def test_create_security_group_rule_with_remote_ip_prefix(self):
-        # Verify creating security group rule with remote_ip_prefix works
+        """Verify creating security group rule with remote_ip_prefix works"""
         sg1_body, _ = self._create_security_group()
 
         sg_id = sg1_body['security_group']['id']
@@ -226,9 +229,10 @@
 
     @decorators.idempotent_id('0a307599-6655-4220-bebc-fd70c64f2290')
     def test_create_security_group_rule_with_protocol_integer_value(self):
-        # Verify creating security group rule with the
-        # protocol as integer value
-        # arguments : "protocol": 17
+        """Verify creating security group rule with the integer protocol value
+
+        arguments : "protocol": 17
+        """
         group_create_body, _ = self._create_security_group()
         direction = 'ingress'
         protocol = 17
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index d054865..beaeb20 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -24,6 +24,7 @@
 
 
 class NegativeSecGroupTest(base.BaseSecGroupTest):
+    """Negative tests of security groups"""
 
     @classmethod
     def skip_checks(cls):
@@ -35,6 +36,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('424fd5c3-9ddc-486a-b45f-39bf0c820fc6')
     def test_show_non_existent_security_group(self):
+        """Test showing non existent security group"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(
             lib_exc.NotFound, self.security_groups_client.show_security_group,
@@ -43,6 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4c094c09-000b-4e41-8100-9617600c02a6')
     def test_show_non_existent_security_group_rule(self):
+        """Test showing non existent security group rule"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(
             lib_exc.NotFound,
@@ -52,6 +55,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1f1bb89d-5664-4956-9fcd-83ee0fa603df')
     def test_delete_non_existent_security_group(self):
+        """Test deleting non existent security group"""
         non_exist_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.security_groups_client.delete_security_group,
@@ -61,6 +65,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('981bdc22-ce48-41ed-900a-73148b583958')
     def test_create_security_group_rule_with_bad_protocol(self):
+        """Test creating security group rule with bad protocol"""
         group_create_body, _ = self._create_security_group()
 
         # Create rule with bad protocol name
@@ -74,6 +79,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5f8daf69-3c5f-4aaa-88c9-db1d66f68679')
     def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
+        """Test creating security group rule with bad remote ip prefix"""
         group_create_body, _ = self._create_security_group()
 
         # Create rule with bad remote_ip_prefix
@@ -89,6 +95,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4bf786fd-2f02-443c-9716-5b98e159a49a')
     def test_create_security_group_rule_with_non_existent_remote_groupid(self):
+        """Creating security group rule with non existent remote group id"""
         group_create_body, _ = self._create_security_group()
         non_exist_id = data_utils.rand_uuid()
 
@@ -105,6 +112,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b5c4b247-6b02-435b-b088-d10d45650881')
     def test_create_security_group_rule_with_remote_ip_and_group(self):
+        """Test creating security group rule with remote ip and group"""
         sg1_body, _ = self._create_security_group()
         sg2_body, _ = self._create_security_group()
 
@@ -121,6 +129,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5666968c-fff3-40d6-9efc-df1c8bd01abb')
     def test_create_security_group_rule_with_bad_ethertype(self):
+        """Test creating security group rule with bad bad ethertype"""
         group_create_body, _ = self._create_security_group()
 
         # Create rule with bad ethertype
@@ -134,6 +143,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d')
     def test_create_security_group_rule_with_invalid_ports(self):
+        """Test creating security group rule with invalid ports"""
         group_create_body, _ = self._create_security_group()
 
         # Create rule for tcp protocol with invalid ports
@@ -168,7 +178,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2323061e-9fbf-4eb0-b547-7e8fafc90849')
     def test_create_additional_default_security_group_fails(self):
-        # Create security group named 'default', it should be failed.
+        """Test creating additional default security group
+
+        Create security group named 'default', it should be failed.
+        """
         name = 'default'
         self.assertRaises(lib_exc.Conflict,
                           self.security_groups_client.create_security_group,
@@ -177,7 +190,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('966e2b96-023a-11e7-a9e4-fa163e4fa634')
     def test_create_security_group_update_name_default(self):
-        # Update security group name to 'default', it should be failed.
+        """Test updating security group's name to default
+
+        Update security group name to 'default', it should be failed.
+        """
         group_create_body, _ = self._create_security_group()
         self.assertRaises(lib_exc.Conflict,
                           self.security_groups_client.update_security_group,
@@ -187,7 +203,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8fde898f-ce88-493b-adc9-4e4692879fc5')
     def test_create_duplicate_security_group_rule_fails(self):
-        # Create duplicate security group rule, it should fail.
+        """Test creating duplicate security group rule
+
+        Create duplicate security group rule, it should fail.
+        """
         body, _ = self._create_security_group()
 
         min_port = 66
@@ -213,7 +232,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('be308db6-a7cf-4d5c-9baf-71bafd73f35e')
     def test_create_security_group_rule_with_non_existent_security_group(self):
-        # Create security group rules with not existing security group.
+        """Creating security group rules with not existing security group"""
         non_existent_sg = data_utils.rand_uuid()
         self.assertRaises(
             lib_exc.NotFound,
@@ -228,6 +247,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7607439c-af73-499e-bf64-f687fd12a842')
     def test_create_security_group_rule_wrong_ip_prefix_version(self):
+        """Test creating security group rule with wrong ip prefix version"""
         group_create_body, _ = self._create_security_group()
 
         # Create rule with bad remote_ip_prefix
diff --git a/tempest/api/network/test_service_providers.py b/tempest/api/network/test_service_providers.py
index 9ebcd89..5af5244 100644
--- a/tempest/api/network/test_service_providers.py
+++ b/tempest/api/network/test_service_providers.py
@@ -18,12 +18,14 @@
 
 
 class ServiceProvidersTest(base.BaseNetworkTest):
+    """Test network service providers"""
 
     @decorators.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
     @testtools.skipUnless(
         utils.is_extension_enabled('service-type', 'network'),
         'service-type extension not enabled.')
     def test_service_providers_list(self):
+        """Test listing network service providers"""
         body = self.service_providers_client.list_service_providers()
         self.assertIn('service_providers', body)
         self.assertIsInstance(body['service_providers'], list)
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index bfc2609..48603ed 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -49,6 +49,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e9811')
     def test_create_list_show_update_delete_subnetpools(self):
+        """Test create/list/show/update/delete of subnet pools"""
         subnetpool_name = data_utils.rand_name('subnetpools')
         # create subnet pool
         prefix = CONF.network.default_network
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index 85f6896..5219c34 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -51,7 +51,7 @@
 
     @decorators.idempotent_id('ee76bfaf-ac94-4d74-9ecc-4bbd4c583cb1')
     def test_create_list_show_update_delete_tags(self):
-        # Validate that creating a tag on a network resource works.
+        """Validate that creating a tag on a network resource works"""
         tag_name = data_utils.rand_name(self.__class__.__name__ + '-Tag')
         self.tags_client.create_tag('networks', self.network['id'], tag_name)
         self.addCleanup(self.tags_client.delete_all_tags, 'networks',
@@ -103,9 +103,10 @@
         List tags.
         Remove a tag.
 
-    v2.0 of the Neutron API is assumed. The tag-ext extension allows users to
-    set tags on the following resources: subnets, ports, routers and
-    subnetpools.
+    v2.0 of the Neutron API is assumed. The tag-ext or standard-attr-tag
+    extension allows users to set tags on the following resources: subnets,
+    ports, routers and subnetpools.
+    from stein release the tag-ext has been renamed to standard-attr-tag
     """
 
     # NOTE(felipemonteiro): The supported resource names are plural. Use
@@ -115,8 +116,12 @@
     @classmethod
     def skip_checks(cls):
         super(TagsExtTest, cls).skip_checks()
-        if not utils.is_extension_enabled('tag-ext', 'network'):
-            msg = "tag-ext extension not enabled."
+        # Added condition to support backward compatiblity since
+        # tag-ext has been renamed to standard-attr-tag
+        if not (utils.is_extension_enabled('tag-ext', 'network') or
+                utils.is_extension_enabled('standard-attr-tag', 'network')):
+            msg = ("neither tag-ext nor standard-attr-tag extensions "
+                   "are enabled.")
             raise cls.skipException(msg)
 
     @classmethod
@@ -153,6 +158,7 @@
 
     @decorators.idempotent_id('c6231efa-9a89-4adf-b050-2a3156b8a1d9')
     def test_create_check_list_and_delete_tags(self):
+        """Test tag operations on subnets/ports/routers/subnetpools"""
         tag_names = self._create_tags_for_each_resource()
 
         for i, resource in enumerate(self.SUPPORTED_RESOURCES):
@@ -176,6 +182,7 @@
 
     @decorators.idempotent_id('663a90f5-f334-4b44-afe0-c5fc1d408791')
     def test_update_and_delete_all_tags(self):
+        """Test update/delete all tags on subnets/ports/routers/subnetpools"""
         self._create_tags_for_each_resource()
 
         for resource in self.SUPPORTED_RESOURCES:
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index e8f3f8b..8d8039b 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -18,7 +18,6 @@
 from tempest.common import custom_matchers
 from tempest import config
 from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
 from tempest.lib import exceptions as lib_exc
 import tempest.test
 
@@ -30,11 +29,6 @@
 
     The containers should be visible from the container_client given.
     Will not throw any error if the containers don't exist.
-    Will not check that object and container deletions succeed.
-    After delete all the objects from a container, it will wait 2
-    seconds before delete the container itself, in order to deployments
-    using HA proxy sync the deletion properly, otherwise, the container
-    might fail to be deleted because it's not empty.
 
     :param containers: List of containers(or string of a container)
                        to be deleted
@@ -50,12 +44,11 @@
             _, objlist = container_client.list_container_objects(cont, params)
             # delete every object in the container
             for obj in objlist:
-                test_utils.call_and_ignore_notfound_exc(
-                    object_client.delete_object, cont, obj['name'])
-            # sleep 2 seconds to sync the deletion of the objects
-            # in HA deployment
-            time.sleep(2)
+                object_client.delete_object(cont, obj['name'])
+                object_client.wait_for_resource_deletion(obj['name'], cont)
+            # Verify resource deletion
             container_client.delete_container(cont)
+            container_client.wait_for_resource_deletion(cont)
         except lib_exc.NotFound:
             pass
 
@@ -123,12 +116,20 @@
             object_name = data_utils.rand_name(name='TestObject')
         if data is None:
             data = data_utils.random_bytes()
-        cls.object_client.create_object(container_name,
-                                        object_name,
-                                        data,
-                                        metadata=metadata)
 
-        return object_name, data
+        err = Exception()
+        for _ in range(5):
+            try:
+                cls.object_client.create_object(container_name,
+                                                object_name,
+                                                data,
+                                                metadata=metadata)
+                return object_name, data
+            # after bucket creation we might see Conflict
+            except lib_exc.Conflict as e:
+                err = e
+                time.sleep(2)
+        raise err
 
     @classmethod
     def delete_containers(cls, container_client=None, object_client=None):
diff --git a/tempest/api/object_storage/test_account_bulk.py b/tempest/api/object_storage/test_account_bulk.py
index 6599e43..687fe57 100644
--- a/tempest/api/object_storage/test_account_bulk.py
+++ b/tempest/api/object_storage/test_account_bulk.py
@@ -16,12 +16,12 @@
 import tempfile
 
 from tempest.api.object_storage import base
-from tempest.common import custom_matchers
 from tempest.common import utils
 from tempest.lib import decorators
 
 
 class BulkTest(base.BaseObjectTest):
+    """Test bulk operation of archived file"""
 
     def setUp(self):
         super(BulkTest, self).setUp()
@@ -71,22 +71,12 @@
     @decorators.idempotent_id('a407de51-1983-47cc-9f14-47c2b059413c')
     @utils.requires_ext(extension='bulk_upload', service='object')
     def test_extract_archive(self):
-        # Test bulk operation of file upload with an archived file
+        """Test bulk operation of file upload with an archived file"""
         filepath, container_name, object_name = self._create_archive()
         resp = self._upload_archive(filepath)
         self.containers.append(container_name)
 
-        # When uploading an archived file with the bulk operation, the response
-        # does not contain 'content-length' header. This is the special case,
-        # therefore the existence of response headers is checked without
-        # custom matcher.
-        self.assertIn('transfer-encoding', resp.response)
-        self.assertIn('content-type', resp.response)
-        self.assertIn('x-trans-id', resp.response)
-        self.assertIn('date', resp.response)
-
-        # Check only the format of common headers with custom matcher
-        self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
+        self.assertHeaders(resp.response, 'Account', 'PUT')
 
         param = {'format': 'json'}
         resp, body = self.account_client.list_account_containers(param)
@@ -106,24 +96,14 @@
     @decorators.idempotent_id('c075e682-0d2a-43b2-808d-4116200d736d')
     @utils.requires_ext(extension='bulk_delete', service='object')
     def test_bulk_delete(self):
-        # Test bulk operation of deleting multiple files
+        """Test bulk operation of deleting multiple files"""
         filepath, container_name, object_name = self._create_archive()
         self._upload_archive(filepath)
 
         data = '%s/%s\n%s' % (container_name, object_name, container_name)
         resp = self.bulk_client.delete_bulk_data(data=data)
 
-        # When deleting multiple files using the bulk operation, the response
-        # does not contain 'content-length' header. This is the special case,
-        # therefore the existence of response headers is checked without
-        # custom matcher.
-        self.assertIn('transfer-encoding', resp.response)
-        self.assertIn('content-type', resp.response)
-        self.assertIn('x-trans-id', resp.response)
-        self.assertIn('date', resp.response)
-
-        # Check only the format of common headers with custom matcher
-        self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
+        self.assertHeaders(resp.response, 'Account', 'DELETE')
 
         # Check if uploaded contents are completely deleted
         self._check_contents_deleted(container_name)
@@ -131,7 +111,7 @@
     @decorators.idempotent_id('dbea2bcb-efbb-4674-ac8a-a5a0e33d1d79')
     @utils.requires_ext(extension='bulk_delete', service='object')
     def test_bulk_delete_by_POST(self):
-        # Test bulk operation of deleting multiple files
+        """Test bulk operation of deleting multiple files by HTTP POST"""
         filepath, container_name, object_name = self._create_archive()
         self._upload_archive(filepath)
 
@@ -139,17 +119,7 @@
 
         resp = self.bulk_client.delete_bulk_data_with_post(data=data)
 
-        # When deleting multiple files using the bulk operation, the response
-        # does not contain 'content-length' header. This is the special case,
-        # therefore the existence of response headers is checked without
-        # custom matcher.
-        self.assertIn('transfer-encoding', resp.response)
-        self.assertIn('content-type', resp.response)
-        self.assertIn('x-trans-id', resp.response)
-        self.assertIn('date', resp.response)
-
-        # Check only the format of common headers with custom matcher
-        self.assertThat(resp.response, custom_matchers.AreAllWellFormatted())
+        self.assertHeaders(resp.response, 'Account', 'POST')
 
         # Check if uploaded contents are completely deleted
         self._check_contents_deleted(container_name)
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 48f42ec..6854bbe 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -22,6 +22,7 @@
 
 
 class AccountQuotasTest(base.BaseObjectTest):
+    """Test account quotas"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['reseller', CONF.object_storage.reseller_admin_role]]
@@ -79,6 +80,7 @@
     @decorators.idempotent_id('a22ef352-a342-4587-8f47-3bbdb5b039c4')
     @utils.requires_ext(extension='account_quotas', service='object')
     def test_upload_valid_object(self):
+        """Test uploading valid object"""
         object_name = data_utils.rand_name(name="TestObject")
         data = data_utils.arbitrary_string()
         resp, _ = self.object_client.create_object(self.container_name,
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index c5c30e3..4966ec4 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -14,8 +14,6 @@
 #    under the License.
 
 import random
-
-import six
 import testtools
 
 from tempest.api.object_storage import base
@@ -28,6 +26,7 @@
 
 
 class AccountTest(base.BaseObjectTest):
+    """Test account metadata and containers"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -42,7 +41,7 @@
     def resource_setup(cls):
         super(AccountTest, cls).resource_setup()
         for i in range(ord('a'), ord('f') + 1):
-            name = data_utils.rand_name(name='%s-' % six.int2byte(i))
+            name = data_utils.rand_name(name='%s-' % bytes((i,)))
             cls.container_client.update_container(name)
             cls.addClassResourceCleanup(base.delete_containers,
                                         [name],
@@ -54,23 +53,22 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
     def test_list_containers(self):
-        # list of all containers should not be empty
+        """Test listing containers"""
         resp, container_list = self.account_client.list_account_containers()
         self.assertHeaders(resp, 'Account', 'GET')
 
         self.assertIsNotNone(container_list)
 
         for container_name in self.containers:
-            self.assertIn(six.text_type(container_name).encode('utf-8'),
+            self.assertIn(str(container_name).encode('utf-8'),
                           container_list)
 
     @decorators.idempotent_id('884ec421-fbad-4fcc-916b-0580f2699565')
     def test_list_no_containers(self):
-        # List request to empty account
+        """Test listing containers for an account without container"""
 
         # To test listing no containers, create new user other than
         # the base user of this instance.
-
         resp, container_list = \
             self.os_operator.account_client.list_account_containers()
 
@@ -103,7 +101,7 @@
 
     @decorators.idempotent_id('1c7efa35-e8a2-4b0b-b5ff-862c7fd83704')
     def test_list_containers_with_format_json(self):
-        # list containers setting format parameter to 'json'
+        """Test listing containers setting format parameter to 'json'"""
         params = {'format': 'json'}
         resp, container_list = self.account_client.list_account_containers(
             params=params)
@@ -115,7 +113,7 @@
 
     @decorators.idempotent_id('4477b609-1ca6-4d4b-b25d-ad3f01086089')
     def test_list_containers_with_format_xml(self):
-        # list containers setting format parameter to 'xml'
+        """Test listing containers setting format parameter to 'xml'"""
         params = {'format': 'xml'}
         resp, container_list = self.account_client.list_account_containers(
             params=params)
@@ -133,13 +131,18 @@
         not CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_list_extensions(self):
+        """Test listing capabilities"""
         resp = self.capabilities_client.list_capabilities()
 
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
 
     @decorators.idempotent_id('5cfa4ab2-4373-48dd-a41f-a532b12b08b2')
     def test_list_containers_with_limit(self):
-        # list containers one of them, half of them then all of them
+        """Test listing containers with limit parameter
+
+        Listing containers limited to one of them, half of them, and then all
+        of them.
+        """
         for limit in (1, self.containers_count // 2,
                       self.containers_count):
             params = {'limit': limit}
@@ -151,10 +154,11 @@
 
     @decorators.idempotent_id('638f876d-6a43-482a-bbb3-0840bca101c6')
     def test_list_containers_with_marker(self):
-        # list containers using marker param
-        # first expect to get 0 container as we specified last
-        # the container as marker
-        # second expect to get the bottom half of the containers
+        """Test listing containers with marker parameter
+
+        First expect to get 0 container as we specified the last container
+        as marker, second expect to get the bottom half of the containers.
+        """
         params = {'marker': self.containers[-1]}
         resp, container_list = \
             self.account_client.list_account_containers(params=params)
@@ -172,10 +176,11 @@
 
     @decorators.idempotent_id('5ca164e4-7bde-43fa-bafb-913b53b9e786')
     def test_list_containers_with_end_marker(self):
-        # list containers using end_marker param
-        # first expect to get 0 container as we specified first container as
-        # end_marker
-        # second expect to get the top half of the containers
+        """Test listing containers with end_marker parameter
+
+        First expect to get 0 container as we specified first container as
+        end_marker, second expect to get the top half of the containers
+        """
         params = {'end_marker': self.containers[0]}
         resp, container_list = \
             self.account_client.list_account_containers(params=params)
@@ -190,7 +195,12 @@
 
     @decorators.idempotent_id('ac8502c2-d4e4-4f68-85a6-40befea2ef5e')
     def test_list_containers_with_marker_and_end_marker(self):
-        # list containers combining marker and end_marker param
+        """Test listing containers with marker and end_marker parameter
+
+        If we use the first container as marker, and the last container as
+        end_marker, then we should get all containers excluding the first one
+        and the last one.
+        """
         params = {'marker': self.containers[0],
                   'end_marker': self.containers[self.containers_count - 1]}
         resp, container_list = self.account_client.list_account_containers(
@@ -200,8 +210,10 @@
 
     @decorators.idempotent_id('f7064ae8-dbcc-48da-b594-82feef6ea5af')
     def test_list_containers_with_limit_and_marker(self):
-        # list containers combining marker and limit param
-        # result are always limitated by the limit whatever the marker
+        """Test listing containers combining marker and limit parameter
+
+        Result are always limited by the limit whatever the marker.
+        """
         for marker in random.choice(self.containers):
             limit = random.randint(0, self.containers_count - 1)
             params = {'marker': marker,
@@ -215,6 +227,10 @@
 
     @decorators.idempotent_id('888a3f0e-7214-4806-8e50-5e0c9a69bb5e')
     def test_list_containers_with_limit_and_end_marker(self):
+        """Test listing containers combining end_marker and limit parameter
+
+        Result are always limited by the limit whatever the end_marker.
+        """
         # list containers combining limit and end_marker param
         limit = random.randint(1, self.containers_count)
         params = {'limit': limit,
@@ -227,7 +243,11 @@
 
     @decorators.idempotent_id('8cf98d9c-e3a0-4e44-971b-c87656fdddbd')
     def test_list_containers_with_limit_and_marker_and_end_marker(self):
-        # list containers combining limit, marker and end_marker param
+        """Test listing containers combining marker and end_marker and limit
+
+        Result are always limited by the limit whatever the marker and the
+        end_marker.
+        """
         limit = random.randint(1, self.containers_count)
         params = {'limit': limit,
                   'marker': self.containers[0],
@@ -240,7 +260,7 @@
 
     @decorators.idempotent_id('365e6fc7-1cfe-463b-a37c-8bd08d47b6aa')
     def test_list_containers_with_prefix(self):
-        # list containers that have a name that starts with a prefix
+        """Test listing containers that have a name starting with a prefix"""
         prefix = 'tempest-a'
         params = {'prefix': prefix}
         resp, container_list = self.account_client.list_account_containers(
@@ -252,7 +272,7 @@
 
     @decorators.idempotent_id('b1811cff-d1ed-4c15-a52e-efd8de41cf34')
     def test_list_containers_reverse_order(self):
-        # list containers in reverse order
+        """Test listing containers in reverse order"""
         _, orig_container_list = self.account_client.list_account_containers()
 
         params = {'reverse': True}
@@ -265,8 +285,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('4894c312-6056-4587-8d6f-86ffbf861f80')
     def test_list_account_metadata(self):
-        # list all account metadata
-
+        """Test listing account metadata"""
         # set metadata to account
         metadata = {'test-account-meta1': 'Meta1',
                     'test-account-meta2': 'Meta2'}
@@ -282,14 +301,14 @@
 
     @decorators.idempotent_id('b904c2e3-24c2-4dba-ad7d-04e90a761be5')
     def test_list_no_account_metadata(self):
-        # list no account metadata
+        """Test listing account metadata for account without metadata"""
         resp, _ = self.account_client.list_account_metadata()
         self.assertHeaders(resp, 'Account', 'HEAD')
         self.assertNotIn('x-account-meta-', str(resp))
 
     @decorators.idempotent_id('e2a08b5f-3115-4768-a3ee-d4287acd6c08')
     def test_update_account_metadata_with_create_metadata(self):
-        # add metadata to account
+        """Test adding metadata to account"""
         metadata = {'test-account-meta1': 'Meta1'}
         resp, _ = self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -305,7 +324,7 @@
 
     @decorators.idempotent_id('9f60348d-c46f-4465-ae06-d51dbd470953')
     def test_update_account_metadata_with_delete_metadata(self):
-        # delete metadata from account
+        """Test deleting metadata from account"""
         metadata = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -318,8 +337,11 @@
 
     @decorators.idempotent_id('64fd53f3-adbd-4639-af54-436e4982dbfb')
     def test_update_account_metadata_with_create_metadata_key(self):
-        # if the value of metadata is not set, the metadata is not
-        # registered at a server
+        """Test adding metadata to account with empty value
+
+        Adding metadata with empty value to account, the metadata is not
+        registered.
+        """
         metadata = {'test-account-meta1': ''}
         resp, _ = self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -330,8 +352,11 @@
 
     @decorators.idempotent_id('d4d884d3-4696-4b85-bc98-4f57c4dd2bf1')
     def test_update_account_metadata_with_delete_metadata_key(self):
-        # Although the value of metadata is not set, the feature of
-        # deleting metadata is valid
+        """Test deleting metadata from account with empty value
+
+        Although the value of metadata is not set, the feature of deleting
+        metadata is valid, so the metadata is removed from account.
+        """
         metadata_1 = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata_1)
@@ -345,7 +370,11 @@
 
     @decorators.idempotent_id('8e5fc073-59b9-42ee-984a-29ed11b2c749')
     def test_update_account_metadata_with_create_and_delete_metadata(self):
-        # Send a request adding and deleting metadata requests simultaneously
+        """Test adding and deleting metadata simultaneously
+
+        Send a request adding and deleting metadata requests simultaneously,
+        both adding and deleting of metadata will succeed.
+        """
         metadata_1 = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata_1)
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index 3e664d7..8d2a501 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -21,6 +21,7 @@
 
 
 class AccountNegativeTest(base.BaseObjectTest):
+    """Negative tests of account"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -33,7 +34,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('070e6aca-6152-4867-868d-1118d68fb38c')
     def test_list_containers_with_non_authorized_user(self):
-        # list containers using non-authorized user
+        """Test listing containers using non-authorized user"""
 
         test_auth_provider = self.os_operator.auth_provider
         # Get auth for the test user
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index e9ca0b1..0259373 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -22,6 +22,7 @@
 
 
 class ObjectTestACLs(base.BaseObjectTest):
+    """Test object ACLs"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -30,13 +31,14 @@
         super(ObjectTestACLs, self).setUp()
         self.container_name = self.create_container()
 
-    def tearDown(self):
-        self.delete_containers()
-        super(ObjectTestACLs, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ObjectTestACLs, cls).resource_cleanup()
 
     @decorators.idempotent_id('a3270f3f-7640-4944-8448-c7ea783ea5b6')
     def test_read_object_with_rights(self):
-        # attempt to read object using authorized user
+        """Test reading object using authorized user"""
         # update X-Container-Read metadata ACL
         tenant_id = self.os_roles_operator_alt.credentials.tenant_id
         user_id = self.os_roles_operator_alt.credentials.user_id
@@ -64,7 +66,7 @@
 
     @decorators.idempotent_id('aa58bfa5-40d9-4bc3-82b4-d07f4a9e392a')
     def test_write_object_with_rights(self):
-        # attempt to write object using authorized user
+        """Test writing object using authorized user"""
         # update X-Container-Write metadata ACL
         tenant_id = self.os_roles_operator_alt.credentials.tenant_id
         user_id = self.os_roles_operator_alt.credentials.user_id
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 90b24b4..85e6ddb 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -22,6 +22,7 @@
 
 
 class ObjectACLsNegativeTest(base.BaseObjectTest):
+    """Negative tests of object ACLs"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -41,13 +42,15 @@
         self.container_name = data_utils.rand_name(name='TestContainer')
         self.container_client.update_container(self.container_name)
 
-    def tearDown(self):
-        self.delete_containers([self.container_name])
-        super(ObjectACLsNegativeTest, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ObjectACLsNegativeTest, cls).resource_cleanup()
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('af587587-0c24-4e15-9822-8352ce711013')
     def test_write_object_without_using_creds(self):
+        """Test writing object without using credentials"""
         # trying to create object with empty headers
         # X-Auth-Token is not provided
         object_name = data_utils.rand_name(name='Object')
@@ -62,6 +65,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('af85af0b-a025-4e72-a90e-121babf55720')
     def test_delete_object_without_using_creds(self):
+        """Test deleting object without using credentials"""
         # create object
         object_name = data_utils.rand_name(name='Object')
         self.object_client.create_object(self.container_name, object_name,
@@ -79,7 +83,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('63d84e37-55a6-42e2-9e5f-276e60e26a00')
     def test_write_object_with_non_authorized_user(self):
-        # attempt to upload another file using non-authorized user
+        """Test writing object with non-authorized user"""
         # User provided token is forbidden. ACL are not set
         object_name = data_utils.rand_name(name='Object')
         # trying to create object with non-authorized user
@@ -94,7 +98,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('abf63359-be52-4feb-87dd-447689fc77fd')
     def test_read_object_with_non_authorized_user(self):
-        # attempt to read object using non-authorized user
+        """Test reading object with non-authorized user"""
         # User provided token is forbidden. ACL are not set
         object_name = data_utils.rand_name(name='Object')
         resp, _ = self.object_client.create_object(
@@ -112,7 +116,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7343ac3d-cfed-4198-9bb0-00149741a492')
     def test_delete_object_with_non_authorized_user(self):
-        # attempt to delete object using non-authorized user
+        """Test deleting object with non-authorized user"""
         # User provided token is forbidden. ACL are not set
         object_name = data_utils.rand_name(name='Object')
         resp, _ = self.object_client.create_object(
@@ -130,7 +134,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9ed01334-01e9-41ea-87ea-e6f465582823')
     def test_read_object_without_rights(self):
-        # attempt to read object using non-authorized user
+        """Test reading object without rights"""
         # update X-Container-Read metadata ACL
         cont_headers = {'X-Container-Read': 'badtenant:baduser'}
         resp_meta, _ = (
@@ -155,7 +159,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a3a585a7-d8cf-4b65-a1a0-edc2b1204f85')
     def test_write_object_without_rights(self):
-        # attempt to write object using non-authorized user
+        """Test writing object without rights"""
         # update X-Container-Write metadata ACL
         cont_headers = {'X-Container-Write': 'badtenant:baduser'}
         resp_meta, _ = (
@@ -177,7 +181,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8ba512ad-aa6e-444e-b882-2906a0ea2052')
     def test_write_object_without_write_rights(self):
-        # attempt to write object using non-authorized user
+        """Test writing object without write rights"""
         # update X-Container-Read and X-Container-Write metadata ACL
         tenant_name = self.os_operator.credentials.tenant_name
         username = self.os_operator.credentials.username
@@ -203,7 +207,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b4e366f8-f185-47ab-b789-df4416f9ecdb')
     def test_delete_object_without_write_rights(self):
-        # attempt to delete object using non-authorized user
+        """Test deleting object without write rights"""
         # update X-Container-Read and X-Container-Write metadata ACL
         tenant_name = self.os_operator.credentials.tenant_name
         username = self.os_operator.credentials.username
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index fcd9a7c..7977a7a 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -44,10 +44,10 @@
         self.container_client.create_update_or_delete_container_metadata(
             self.container_name, create_update_metadata=metadata)
 
-    def tearDown(self):
-        """Cleans the container of any object after each test."""
-        self.delete_containers()
-        super(ContainerQuotasTest, self).tearDown()
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerQuotasTest, cls).resource_cleanup()
 
     @decorators.idempotent_id('9a0fb034-86af-4df0-86fa-f8bd7db21ae0')
     @utils.requires_ext(extension='container_quotas', service='object')
diff --git a/tempest/api/object_storage/test_container_services.py b/tempest/api/object_storage/test_container_services.py
index cdc420e..085b8ab 100644
--- a/tempest/api/object_storage/test_container_services.py
+++ b/tempest/api/object_storage/test_container_services.py
@@ -19,13 +19,17 @@
 
 
 class ContainerTest(base.BaseObjectTest):
-    def tearDown(self):
-        self.delete_containers()
-        super(ContainerTest, self).tearDown()
+    """Test containers"""
+
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerTest, cls).resource_cleanup()
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
     def test_create_container(self):
+        """Test creating container"""
         container_name = data_utils.rand_name(name='TestContainer')
         resp, _ = self.container_client.update_container(container_name)
         self.containers.append(container_name)
@@ -33,7 +37,7 @@
 
     @decorators.idempotent_id('49f866ed-d6af-4395-93e7-4187eb56d322')
     def test_create_container_overwrite(self):
-        # overwrite container with the same name
+        """Test overwriting container with the same name"""
         container_name = data_utils.rand_name(name='TestContainer')
         self.container_client.update_container(container_name)
         self.containers.append(container_name)
@@ -43,7 +47,7 @@
 
     @decorators.idempotent_id('c2ac4d59-d0f5-40d5-ba19-0635056d48cd')
     def test_create_container_with_metadata_key(self):
-        # create container with the blank value of metadata
+        """Test creating container with the blank value of metadata"""
         container_name = data_utils.rand_name(name='TestContainer')
         headers = {'X-Container-Meta-test-container-meta': ''}
         resp, _ = self.container_client.update_container(
@@ -60,7 +64,7 @@
 
     @decorators.idempotent_id('e1e8df32-7b22-44e1-aa08-ccfd8d446b58')
     def test_create_container_with_metadata_value(self):
-        # create container with metadata value
+        """Test creating container with metadata value"""
         container_name = data_utils.rand_name(name='TestContainer')
 
         # metadata name using underscores should be converted to hyphens
@@ -79,7 +83,7 @@
 
     @decorators.idempotent_id('24d16451-1c0c-4e4f-b59c-9840a3aba40e')
     def test_create_container_with_remove_metadata_key(self):
-        # create container with the blank value of remove metadata
+        """Test creating container with the blank value of remove metadata"""
         container_name = data_utils.rand_name(name='TestContainer')
         headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
         self.container_client.update_container(container_name, **headers)
@@ -97,7 +101,7 @@
 
     @decorators.idempotent_id('8a21ebad-a5c7-4e29-b428-384edc8cd156')
     def test_create_container_with_remove_metadata_value(self):
-        # create container with remove metadata
+        """Test creating container with remove metadata"""
         container_name = data_utils.rand_name(name='TestContainer')
         headers = {'X-Container-Meta-test-container-meta': 'Meta1'}
         self.container_client.update_container(container_name, **headers)
@@ -114,6 +118,7 @@
 
     @decorators.idempotent_id('95d3a249-b702-4082-a2c4-14bb860cf06a')
     def test_delete_container(self):
+        """Test deleting container"""
         # create a container
         container_name = self.create_container()
         # delete container, success asserted within
@@ -123,7 +128,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('312ff6bd-5290-497f-bda1-7c5fec6697ab')
     def test_list_container_contents(self):
-        # get container contents list
+        """Test getting container contents list"""
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
@@ -134,7 +139,7 @@
 
     @decorators.idempotent_id('4646ac2d-9bfb-4c7d-a3c5-0f527402b3df')
     def test_list_container_contents_with_no_object(self):
-        # get empty container contents list
+        """Test getting empty container contents list"""
         container_name = self.create_container()
 
         resp, object_list = self.container_client.list_container_objects(
@@ -144,7 +149,7 @@
 
     @decorators.idempotent_id('fe323a32-57b9-4704-a996-2e68f83b09bc')
     def test_list_container_contents_with_delimiter(self):
-        # get container contents list using delimiter param
+        """Test getting container contents list using delimiter param"""
         container_name = self.create_container()
         object_name = data_utils.rand_name(name='TestObject/')
         self.create_object(container_name, object_name)
@@ -158,7 +163,7 @@
 
     @decorators.idempotent_id('55b4fa5c-e12e-4ca9-8fcf-a79afe118522')
     def test_list_container_contents_with_end_marker(self):
-        # get container contents list using end_marker param
+        """Test getting container contents list using end_marker param"""
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
@@ -171,7 +176,7 @@
 
     @decorators.idempotent_id('196f5034-6ab0-4032-9da9-a937bbb9fba9')
     def test_list_container_contents_with_format_json(self):
-        # get container contents list using format_json param
+        """Test getting container contents list using format_json param"""
         container_name = self.create_container()
         self.create_object(container_name)
 
@@ -190,7 +195,7 @@
 
     @decorators.idempotent_id('655a53ca-4d15-408c-a377-f4c6dbd0a1fa')
     def test_list_container_contents_with_format_xml(self):
-        # get container contents list using format_xml param
+        """Test getting container contents list using format_xml param"""
         container_name = self.create_container()
         self.create_object(container_name)
 
@@ -214,7 +219,7 @@
 
     @decorators.idempotent_id('297ec38b-2b61-4ff4-bcd1-7fa055e97b61')
     def test_list_container_contents_with_limit(self):
-        # get container contents list using limit param
+        """Test getting container contents list using limit param"""
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
@@ -227,7 +232,7 @@
 
     @decorators.idempotent_id('c31ddc63-2a58-4f6b-b25c-94d2937e6867')
     def test_list_container_contents_with_marker(self):
-        # get container contents list using marker param
+        """Test getting container contents list using marker param"""
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
@@ -240,7 +245,7 @@
 
     @decorators.idempotent_id('58ca6cc9-6af0-408d-aaec-2a6a7b2f0df9')
     def test_list_container_contents_with_path(self):
-        # get container contents list using path param
+        """Test getting container contents list using path param"""
         container_name = self.create_container()
         object_name = data_utils.rand_name(name='TestObject')
         object_name = 'Swift/' + object_name
@@ -255,7 +260,7 @@
 
     @decorators.idempotent_id('77e742c7-caf2-4ec9-8aa4-f7d509a3344c')
     def test_list_container_contents_with_prefix(self):
-        # get container contents list using prefix param
+        """Test getting container contents list using prefix param"""
         container_name = self.create_container()
         object_name, _ = self.create_object(container_name)
 
@@ -270,7 +275,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('96e68f0e-19ec-4aa2-86f3-adc6a45e14dd')
     def test_list_container_metadata(self):
-        # List container metadata
+        """Test listing container metadata"""
         container_name = self.create_container()
 
         metadata = {'name': 'Pictures'}
@@ -286,7 +291,7 @@
 
     @decorators.idempotent_id('a2faf936-6b13-4f8d-92a2-c2278355821e')
     def test_list_no_container_metadata(self):
-        # HEAD container without metadata
+        """Test listing container without metadata"""
         container_name = self.create_container()
 
         resp, _ = self.container_client.list_container_metadata(
@@ -296,7 +301,10 @@
 
     @decorators.idempotent_id('cf19bc0b-7e16-4a5a-aaed-cb0c2fe8deef')
     def test_update_container_metadata_with_create_and_delete_metadata(self):
-        # Send one request of adding and deleting metadata
+        """Test updating container with adding and deleting metadata
+
+        Send one request of adding and deleting metadata.
+        """
         container_name = data_utils.rand_name(name='TestContainer')
         metadata_1 = {'X-Container-Meta-test-container-meta1': 'Meta1'}
         self.container_client.update_container(container_name, **metadata_1)
@@ -319,7 +327,7 @@
 
     @decorators.idempotent_id('2ae5f295-4bf1-4e04-bfad-21e54b62cec5')
     def test_update_container_metadata_with_create_metadata(self):
-        # update container metadata using add metadata
+        """Test updating container metadata using add metadata"""
         container_name = self.create_container()
 
         metadata = {'test-container-meta1': 'Meta1'}
@@ -337,7 +345,7 @@
 
     @decorators.idempotent_id('3a5ce7d4-6e4b-47d0-9d87-7cd42c325094')
     def test_update_container_metadata_with_delete_metadata(self):
-        # update container metadata using delete metadata
+        """Test updating container metadata using delete metadata"""
         container_name = data_utils.rand_name(name='TestContainer')
         metadata = {'X-Container-Meta-test-container-meta1': 'Meta1'}
         self.container_client.update_container(container_name, **metadata)
@@ -355,7 +363,7 @@
 
     @decorators.idempotent_id('31f40a5f-6a52-4314-8794-cd89baed3040')
     def test_update_container_metadata_with_create_metadata_key(self):
-        # update container metadata with a blank value of metadata
+        """Test updating container metadata with a blank value of metadata"""
         container_name = self.create_container()
 
         metadata = {'test-container-meta1': ''}
@@ -371,7 +379,7 @@
 
     @decorators.idempotent_id('a2e36378-6f1f-43f4-840a-ffd9cfd61914')
     def test_update_container_metadata_with_delete_metadata_key(self):
-        # update container metadata with a blank value of metadata
+        """Test updating container metadata with a blank value of metadata"""
         container_name = data_utils.rand_name(name='TestContainer')
         headers = {'X-Container-Meta-test-container-meta1': 'Meta1'}
         self.container_client.update_container(container_name, **headers)
diff --git a/tempest/api/object_storage/test_container_services_negative.py b/tempest/api/object_storage/test_container_services_negative.py
index b8c83b7..51c711f 100644
--- a/tempest/api/object_storage/test_container_services_negative.py
+++ b/tempest/api/object_storage/test_container_services_negative.py
@@ -25,6 +25,7 @@
 
 
 class ContainerNegativeTest(base.BaseObjectTest):
+    """Negative tests of containers"""
 
     @classmethod
     def resource_setup(cls):
@@ -35,13 +36,18 @@
             body = cls.capabilities_client.list_capabilities()
             cls.constraints = body['swift']
 
+    @classmethod
+    def resource_cleanup(cls):
+        cls.delete_containers()
+        super(ContainerNegativeTest, cls).resource_cleanup()
+
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('30686921-4bed-4764-a038-40d741ed4e78')
     @testtools.skipUnless(
         CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_create_container_name_exceeds_max_length(self):
-        # Attempts to create a container name that is longer than max
+        """Test creating container with name longer than max"""
         max_length = self.constraints['max_container_name_length']
         # create a container with long name
         container_name = data_utils.arbitrary_string(size=max_length + 1)
@@ -58,8 +64,7 @@
         CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_create_container_metadata_name_exceeds_max_length(self):
-        # Attempts to create container with metadata name
-        # that is longer than max.
+        """Test creating container with metadata name longer than max"""
         max_length = self.constraints['max_meta_name_length']
         container_name = data_utils.rand_name(name='TestContainer')
         metadata_name = 'X-Container-Meta-' + data_utils.arbitrary_string(
@@ -77,8 +82,7 @@
         CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_create_container_metadata_value_exceeds_max_length(self):
-        # Attempts to create container with metadata value
-        # that is longer than max.
+        """Test creating container with metadata value longer than max"""
         max_length = self.constraints['max_meta_value_length']
         container_name = data_utils.rand_name(name='TestContainer')
         metadata_value = data_utils.arbitrary_string(size=max_length + 1)
@@ -95,8 +99,7 @@
         CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_create_container_metadata_exceeds_overall_metadata_count(self):
-        # Attempts to create container with metadata that exceeds the
-        # default count
+        """Test creating container with metadata exceeding default count"""
         max_count = self.constraints['max_meta_count']
         container_name = data_utils.rand_name(name='TestContainer')
         metadata = {}
@@ -113,8 +116,7 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('1a95ab2e-b712-4a98-8a4d-8ce21b7557d6')
     def test_get_metadata_headers_with_invalid_container_name(self):
-        # Attempts to retrieve metadata headers with an invalid
-        # container name.
+        """Test getting metadata headers with invalid container name"""
         self.assertRaises(exceptions.NotFound,
                           self.container_client.list_container_metadata,
                           'invalid_container_name')
@@ -122,7 +124,7 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('125a24fa-90a7-4cfc-b604-44e49d788390')
     def test_update_metadata_with_nonexistent_container_name(self):
-        # Attempts to update metadata using a nonexistent container name.
+        """Test updating metadata using a nonexistent container name"""
         metadata = {'animal': 'penguin'}
 
         self.assertRaises(
@@ -133,7 +135,7 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('65387dbf-a0e2-4aac-9ddc-16eb3f1f69ba')
     def test_delete_with_nonexistent_container_name(self):
-        # Attempts to delete metadata using a nonexistent container name.
+        """Test deleting metadata using a non existent container name"""
         metadata = {'animal': 'penguin'}
 
         self.assertRaises(
@@ -144,8 +146,7 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('14331d21-1e81-420a-beea-19cb5e5207f5')
     def test_list_all_container_objects_with_nonexistent_container(self):
-        # Attempts to get a listing of all objects on a container
-        # that doesn't exist.
+        """Test getting a list of all objects on a non existent container"""
         params = {'limit': 9999, 'format': 'json'}
         self.assertRaises(exceptions.NotFound,
                           self.container_client.list_container_objects,
@@ -154,8 +155,7 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('86b2ab08-92d5-493d-acd2-85f0c848819e')
     def test_list_all_container_objects_on_deleted_container(self):
-        # Attempts to get a listing of all objects on a container
-        # that was deleted.
+        """Test getting a list of all objects on a deleted container"""
         container_name = self.create_container()
         # delete container
         resp, _ = self.container_client.delete_container(container_name)
@@ -168,14 +168,11 @@
     @decorators.attr(type=["negative"])
     @decorators.idempotent_id('42da116e-1e8c-4c96-9e06-2f13884ed2b1')
     def test_delete_non_empty_container(self):
+        """Test deleting a container with object in it"""
         # create a container and an object within it
         # attempt to delete a container that isn't empty.
         container_name = self.create_container()
-        self.addCleanup(self.container_client.delete_container,
-                        container_name)
         object_name, _ = self.create_object(container_name)
-        self.addCleanup(self.object_client.delete_object,
-                        container_name, object_name)
 
         ex = self.assertRaises(exceptions.Conflict,
                                self.container_client.delete_container,
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 1243b83..ef98ed8 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -21,6 +21,7 @@
 
 
 class StaticWebTest(base.BaseObjectTest):
+    """Test static web"""
 
     @classmethod
     def resource_setup(cls):
@@ -47,6 +48,7 @@
     @decorators.idempotent_id('c1f055ab-621d-4a6a-831f-846fcb578b8b')
     @utils.requires_ext(extension='staticweb', service='object')
     def test_web_index(self):
+        """Test web index"""
         headers = {'web-index': self.object_name}
 
         self.container_client.create_update_or_delete_container_metadata(
@@ -79,6 +81,7 @@
     @decorators.idempotent_id('941814cf-db9e-4b21-8112-2b6d0af10ee5')
     @utils.requires_ext(extension='staticweb', service='object')
     def test_web_listing(self):
+        """Test web listing"""
         headers = {'web-listings': 'true'}
 
         self.container_client.create_update_or_delete_container_metadata(
@@ -111,6 +114,7 @@
     @decorators.idempotent_id('bc37ec94-43c8-4990-842e-0e5e02fc8926')
     @utils.requires_ext(extension='staticweb', service='object')
     def test_web_listing_css(self):
+        """Test web listing css"""
         headers = {'web-listings': 'true',
                    'web-listings-css': 'listings.css'}
 
@@ -134,6 +138,7 @@
     @decorators.idempotent_id('f18b4bef-212e-45e7-b3ca-59af3a465f82')
     @utils.requires_ext(extension='staticweb', service='object')
     def test_web_error(self):
+        """Test web error"""
         headers = {'web-listings': 'true',
                    'web-error': self.object_name}
 
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 322579c..6b1f849 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import time
+from urllib import parse as urlparse
 
-from six.moves.urllib import parse as urlparse
 import testtools
 
 from tempest.api.object_storage import base
@@ -33,6 +33,8 @@
 
 
 class ContainerSyncTest(base.BaseObjectTest):
+    """Test container synchronization"""
+
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
 
@@ -56,6 +58,7 @@
 
         # Default container-server config only allows localhost
         cls.local_ip = '127.0.0.1'
+        cls.local_ip_v6 = '[::1]'
 
         # Must be configure according to container-sync interval
         container_sync_timeout = CONF.object_storage.container_sync_timeout
@@ -90,7 +93,7 @@
             # create object in container
             object_name = data_utils.rand_name(name='TestSyncObject')
             data = object_name[::-1].encode()  # Raw data, we need bytes
-            resp, _ = obj_client[0].create_object(cont[0], object_name, data)
+            obj_client[0].create_object(cont[0], object_name, data)
             self.objects.append(object_name)
 
         # wait until container contents list is not empty
@@ -123,19 +126,27 @@
                 self.assertEqual(object_content, obj_name[::-1].encode())
 
     @decorators.attr(type='slow')
-    @decorators.skip_because(bug='1317133')
+    @decorators.unstable_test(bug='1317133')
     @decorators.idempotent_id('be008325-1bba-4925-b7dd-93b58f22ce9b')
     @testtools.skipIf(
         not CONF.object_storage_feature_enabled.container_sync,
         'Old-style container sync function is disabled')
     def test_container_synchronization(self):
+        """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
-            client_proxy_ip = \
-                urlparse.urlparse(cont_client.base_url).netloc.split(':')[0]
-            client_base_url = \
-                cont_client.base_url.replace(client_proxy_ip,
-                                             self.local_ip)
+            # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+            # handled properly as well
+            client_proxy_ip = urlparse.urlparse(
+                cont_client.base_url).netloc.rsplit(':', 1)[0]
+            if client_proxy_ip.startswith("["):  # lazy check
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip_v6)
+            else:
+                client_base_url = \
+                    cont_client.base_url.replace(client_proxy_ip,
+                                                 self.local_ip)
             headers = {'X-Container-Sync-Key': 'sync_key',
                        'X-Container-Sync-To': "%s/%s" %
                        (client_base_url, str(cont))}
diff --git a/tempest/api/object_storage/test_container_sync_middleware.py b/tempest/api/object_storage/test_container_sync_middleware.py
index e77b079..db6cfa4 100644
--- a/tempest/api/object_storage/test_container_sync_middleware.py
+++ b/tempest/api/object_storage/test_container_sync_middleware.py
@@ -27,6 +27,7 @@
 
 
 class ContainerSyncMiddlewareTest(test_container_sync.ContainerSyncTest):
+    """Test containers synchronization specifying realm and cluster"""
 
     @classmethod
     def resource_setup(cls):
@@ -41,6 +42,7 @@
     @decorators.idempotent_id('ea4645a1-d147-4976-82f7-e5a7a3065f80')
     @utils.requires_ext(extension='container_sync', service='object')
     def test_container_synchronization(self):
+        """Test container synchronization specifying realm and cluster"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
             account_name = cont_client.base_url.split('/')[-1]
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 1567e06..365dc78 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -19,6 +19,7 @@
 
 
 class CrossdomainTest(base.BaseObjectTest):
+    """Test crossdomain policy"""
 
     @classmethod
     def resource_setup(cls):
@@ -31,12 +32,10 @@
 
         cls.xml_end = "</cross-domain-policy>"
 
-    def setUp(self):
-        super(CrossdomainTest, self).setUp()
-
     @decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
     @utils.requires_ext(extension='crossdomain', service='object')
     def test_get_crossdomain_policy(self):
+        """Test getting crossdomain policy"""
         url = self.account_client._get_base_version_url() + "crossdomain.xml"
         resp, body = self.account_client.raw_request(url, "GET")
         self.account_client._error_checker(resp, body)
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index 8e9e406..d4a6a9f2 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -19,12 +19,11 @@
 
 
 class HealthcheckTest(base.BaseObjectTest):
-
-    def setUp(self):
-        super(HealthcheckTest, self).setUp()
+    """Test healthcheck"""
 
     @decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
     def test_get_healthcheck(self):
+        """Test getting healthcheck"""
         url = self.account_client._get_base_version_url() + "healthcheck"
         resp, body = self.account_client.raw_request(url, "GET")
         self.account_client._error_checker(resp, body)
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 86f7c8c..6f6e32f 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -21,6 +21,8 @@
 
 
 class ObjectExpiryTest(base.BaseObjectTest):
+    """Test object expiry"""
+
     @classmethod
     def resource_setup(cls):
         super(ObjectExpiryTest, cls).resource_setup()
@@ -83,6 +85,7 @@
 
     @decorators.idempotent_id('fb024a42-37f3-4ba5-9684-4f40a7910b41')
     def test_get_object_after_expiry_time(self):
+        """Test object is expired after x-delete-after time"""
         # the 10s is important, because the get calls can take 3s each
         # some times
         metadata = {'X-Delete-After': '10'}
@@ -90,5 +93,6 @@
 
     @decorators.idempotent_id('e592f18d-679c-48fe-9e36-4be5f47102c5')
     def test_get_object_at_expiry_time(self):
+        """Test object is expired at x-delete-at time"""
         metadata = {'X-Delete-At': str(int(time.time()) + 10)}
         self._test_object_expiry(metadata)
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index cd834bf..39e895e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
@@ -25,6 +24,7 @@
 
 
 class ObjectFormPostTest(base.BaseObjectTest):
+    """Test object post with form"""
 
     metadata = {}
     containers = []
@@ -110,6 +110,7 @@
     @decorators.idempotent_id('80fac02b-6e54-4f7b-be0d-a965b5cbef76')
     @utils.requires_ext(extension='formpost', service='object')
     def test_post_object_using_form(self):
+        """Test posting object using form"""
         body, content_type = self.get_multipart_form()
 
         headers = {'Content-Type': content_type,
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index df6a0fd..971a223 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
@@ -26,6 +25,7 @@
 
 
 class ObjectFormPostNegativeTest(base.BaseObjectTest):
+    """Negative tests of object post with form"""
 
     metadata = {}
     containers = []
@@ -112,6 +112,7 @@
     @utils.requires_ext(extension='formpost', service='object')
     @decorators.attr(type=['negative'])
     def test_post_object_using_form_expired(self):
+        """Test posting object using expired form"""
         body, content_type = self.get_multipart_form(expires=1)
         time.sleep(2)
 
@@ -129,6 +130,7 @@
     @utils.requires_ext(extension='formpost', service='object')
     @decorators.attr(type=['negative'])
     def test_post_object_using_form_invalid_signature(self):
+        """Test posting object using form with invalid signature"""
         self.key = "Wrong"
         body, content_type = self.get_multipart_form()
 
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index acb578d..2823185 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -13,12 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import random
 import re
 import time
 import zlib
 
+from oslo_utils.secretutils import md5
 from tempest.api.object_storage import base
 from tempest.common import custom_matchers
 from tempest import config
@@ -29,6 +29,7 @@
 
 
 class ObjectTest(base.BaseObjectTest):
+    """Test storage object"""
 
     @classmethod
     def resource_setup(cls):
@@ -78,6 +79,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('5b4ce26f-3545-46c9-a2ba-5754358a4c62')
     def test_create_object(self):
+        """Test creating object and checking the object's uploaded content"""
         # create object
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
@@ -97,7 +99,7 @@
 
     @decorators.idempotent_id('5daebb1d-f0d5-4dc9-b541-69672eff00b0')
     def test_create_object_with_content_disposition(self):
-        # create object with content_disposition
+        """Test creating object with content-disposition"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {}
@@ -119,7 +121,7 @@
 
     @decorators.idempotent_id('605f8317-f945-4bee-ae91-013f1da8f0a0')
     def test_create_object_with_content_encoding(self):
-        # create object with content_encoding
+        """Test creating object with content-encoding"""
         object_name = data_utils.rand_name(name='TestObject')
 
         # put compressed string
@@ -146,11 +148,11 @@
 
     @decorators.idempotent_id('73820093-0503-40b1-a478-edf0e69c7d1f')
     def test_create_object_with_etag(self):
-        # create object with etag
+        """Test creating object with Etag"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
-        md5 = hashlib.md5(data).hexdigest()
-        metadata = {'Etag': md5}
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
+        metadata = {'Etag': create_md5}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
@@ -165,8 +167,7 @@
 
     @decorators.idempotent_id('84dafe57-9666-4f6d-84c8-0814d37923b8')
     def test_create_object_with_expect_continue(self):
-        # create object with expect_continue
-
+        """Test creating object with expect_continue"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
 
@@ -181,8 +182,9 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
+    @decorators.unstable_test(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
-        # create object with transfer_encoding
+        """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
         headers = {'Transfer-Encoding': 'chunked'}
@@ -202,7 +204,10 @@
 
     @decorators.idempotent_id('0f3d62a6-47e3-4554-b0e5-1a5dc372d501')
     def test_create_object_with_x_fresh_metadata(self):
-        # create object with x_fresh_metadata
+        """Test creating object with x-fresh-metadata
+
+        The previous added metadata will be cleared.
+        """
         object_name_base = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
@@ -228,7 +233,7 @@
 
     @decorators.idempotent_id('1c7ed3e4-2099-406b-b843-5301d4811baf')
     def test_create_object_with_x_object_meta(self):
-        # create object with object_meta
+        """Test creating object with x-object-meta"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -247,7 +252,7 @@
 
     @decorators.idempotent_id('e4183917-33db-4153-85cc-4dacbb938865')
     def test_create_object_with_x_object_metakey(self):
-        # create object with the blank value of metadata
+        """Test creating object with the blank value of metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': ''}
@@ -266,7 +271,10 @@
 
     @decorators.idempotent_id('ce798afc-b278-45de-a5ce-2ea124b98b99')
     def test_create_object_with_x_remove_object_meta(self):
-        # create object with x_remove_object_meta
+        """Test creating object with x-remove-object-meta
+
+        The metadata will be removed from the object.
+        """
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -289,7 +297,11 @@
 
     @decorators.idempotent_id('ad21e342-7916-4f9e-ab62-a1f885f2aaf9')
     def test_create_object_with_x_remove_object_metakey(self):
-        # create object with the blank value of remove metadata
+        """Test creating object with the blank value of remove metadata
+
+        Creating object with blank metadata 'X-Remove-Object-Meta-test-meta',
+        metadata 'x-object-meta-test-meta' will be removed from the object.
+        """
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -312,7 +324,7 @@
 
     @decorators.idempotent_id('17738d45-03bd-4d45-9e0b-7b2f58f98687')
     def test_delete_object(self):
-        # create object
+        """Test deleting object"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         resp, _ = self.object_client.create_object(self.container_name,
@@ -325,7 +337,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('7a94c25d-66e6-434c-9c38-97d4e2c29945')
     def test_update_object_metadata(self):
-        # update object metadata
+        """Test updating object metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -343,7 +355,7 @@
 
     @decorators.idempotent_id('48650ed0-c189-4e1e-ad6b-1d4770c6e134')
     def test_update_object_metadata_with_remove_metadata(self):
-        # update object metadata with remove metadata
+        """Test updating object metadata with remove metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
@@ -366,6 +378,11 @@
 
     @decorators.idempotent_id('f726174b-2ded-4708-bff7-729d12ce1f84')
     def test_update_object_metadata_with_create_and_remove_metadata(self):
+        """Test updating object with creation and deletion of metadata
+
+        Update object with creation and deletion of metadata with one
+        request, both operations will succeed.
+        """
         # creation and deletion of metadata with one request
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
@@ -392,8 +409,7 @@
 
     @decorators.idempotent_id('08854588-6449-4bb7-8cca-f2e1040f5e6f')
     def test_update_object_metadata_with_x_object_manifest(self):
-        # update object metadata with x_object_manifest
-
+        """Test updating object metadata with x_object_manifest"""
         # uploading segments
         object_name, _ = self._upload_segments()
         # creating a manifest file
@@ -418,7 +434,7 @@
 
     @decorators.idempotent_id('0dbbe89c-6811-4d84-a2df-eca2bdd40c0e')
     def test_update_object_metadata_with_x_object_metakey(self):
-        # update object metadata with a blank value of metadata
+        """Test updating object metadata with a blank value of metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         update_metadata = {'X-Object-Meta-test-meta': ''}
@@ -436,7 +452,7 @@
 
     @decorators.idempotent_id('9a88dca4-b684-425b-806f-306cd0e57e42')
     def test_update_object_metadata_with_x_remove_object_metakey(self):
-        # update object metadata with a blank value of remove metadata
+        """Test updating object metadata with blank remove metadata value"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.arbitrary_string()
         create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -460,7 +476,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('9a447cf6-de06-48de-8226-a8c6ed31caf2')
     def test_list_object_metadata(self):
-        # get object metadata
+        """Test listing object metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -478,7 +494,7 @@
 
     @decorators.idempotent_id('170fb90e-f5c3-4b1f-ae1b-a18810821172')
     def test_list_no_object_metadata(self):
-        # get empty list of object metadata
+        """Test listing object metadata for object without metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         resp, _ = self.object_client.list_object_metadata(
@@ -489,8 +505,7 @@
 
     @decorators.idempotent_id('23a3674c-d6de-46c3-86af-ff92bfc8a3da')
     def test_list_object_metadata_with_x_object_manifest(self):
-        # get object metadata with x_object_manifest
-
+        """Test getting object metadata with x_object_manifest"""
         # uploading segments
         object_name, _ = self._upload_segments()
         # creating a manifest file
@@ -530,7 +545,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('02610ba7-86b7-4272-9ed8-aa8d417cb3cd')
     def test_get_object(self):
-        # retrieve object's data (in response body)
+        """Test retrieving object's data (in response body)"""
 
         # create object
         object_name, data = self.create_object(self.container_name)
@@ -543,7 +558,7 @@
 
     @decorators.idempotent_id('005f9bf6-e06d-41ec-968e-96c78e0b1d82')
     def test_get_object_with_metadata(self):
-        # get object with metadata
+        """Test getting object with metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -562,7 +577,7 @@
 
     @decorators.idempotent_id('05a1890e-7db9-4a6c-90a8-ce998a2bddfa')
     def test_get_object_with_range(self):
-        # get object with range
+        """Test getting object with range"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(100)
         self.object_client.create_object(self.container_name,
@@ -580,7 +595,7 @@
 
     @decorators.idempotent_id('11b4515b-7ba7-4ca8-8838-357ded86fc10')
     def test_get_object_with_x_object_manifest(self):
-        # get object with x_object_manifest
+        """Test getting object with x_object_manifest"""
 
         # uploading segments
         object_name, data_segments = self._upload_segments()
@@ -623,10 +638,10 @@
 
     @decorators.idempotent_id('c05b4013-e4de-47af-be84-e598062b16fc')
     def test_get_object_with_if_match(self):
-        # get object with if_match
+        """Test getting object with if_match"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(10)
-        create_md5 = hashlib.md5(data).hexdigest()
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
         create_metadata = {'Etag': create_md5}
         self.object_client.create_object(self.container_name,
                                          object_name,
@@ -643,7 +658,7 @@
 
     @decorators.idempotent_id('be133639-e5d2-4313-9b1f-2d59fc054a16')
     def test_get_object_with_if_modified_since(self):
-        # get object with if_modified_since
+        """Test getting object with if_modified_since"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         time_now = time.time()
@@ -663,10 +678,10 @@
 
     @decorators.idempotent_id('641500d5-1612-4042-a04d-01fc4528bc30')
     def test_get_object_with_if_none_match(self):
-        # get object with if_none_match
+        """Test getting object with if_none_match"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
-        create_md5 = hashlib.md5(data).hexdigest()
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
         create_metadata = {'Etag': create_md5}
         self.object_client.create_object(self.container_name,
                                          object_name,
@@ -674,7 +689,7 @@
                                          metadata=create_metadata)
 
         list_data = data_utils.random_bytes()
-        list_md5 = hashlib.md5(list_data).hexdigest()
+        list_md5 = md5(list_data, usedforsecurity=False).hexdigest()
         list_metadata = {'If-None-Match': list_md5}
         resp, body = self.object_client.get_object(
             self.container_name,
@@ -685,7 +700,7 @@
 
     @decorators.idempotent_id('0aa1201c-10aa-467a-bee7-63cbdd463152')
     def test_get_object_with_if_unmodified_since(self):
-        # get object with if_unmodified_since
+        """Test getting object with if_unmodified_since"""
         object_name, data = self.create_object(self.container_name)
 
         time_now = time.time()
@@ -700,7 +715,7 @@
 
     @decorators.idempotent_id('94587078-475f-48f9-a40f-389c246e31cd')
     def test_get_object_with_x_newest(self):
-        # get object with x_newest
+        """Test getting object with x_newest"""
         object_name, data = self.create_object(self.container_name)
 
         list_metadata = {'X-Newest': 'true'}
@@ -713,6 +728,7 @@
 
     @decorators.idempotent_id('1a9ab572-1b66-4981-8c21-416e2a5e6011')
     def test_copy_object_in_same_container(self):
+        """Test copying object to another object in same container"""
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
         src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -742,7 +758,7 @@
 
     @decorators.idempotent_id('2248abba-415d-410b-9c30-22dff9cd6e67')
     def test_copy_object_to_itself(self):
-        # change the content type of an existing object
+        """Test changing the content type of an existing object"""
 
         # create object
         object_name, _ = self.create_object(self.container_name)
@@ -755,11 +771,11 @@
         headers = {}
         headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
                                             str(object_name))
-        resp, body = self.object_client.create_object(self.container_name,
-                                                      object_name,
-                                                      data=None,
-                                                      metadata=metadata,
-                                                      headers=headers)
+        resp, _ = self.object_client.create_object(self.container_name,
+                                                   object_name,
+                                                   data=None,
+                                                   metadata=metadata,
+                                                   headers=headers)
         self.assertHeaders(resp, 'Object', 'PUT')
 
         # check the content type
@@ -769,6 +785,7 @@
 
     @decorators.idempotent_id('06f90388-2d0e-40aa-934c-e9a8833e958a')
     def test_copy_object_2d_way(self):
+        """Test copying object's data to the new object using COPY"""
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
         src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -793,6 +810,7 @@
 
     @decorators.idempotent_id('aa467252-44f3-472a-b5ae-5b57c3c9c147')
     def test_copy_object_across_containers(self):
+        """Test copying object to another container"""
         # create a container to use as a source container
         src_container_name = data_utils.rand_name(name='TestSourceContainer')
         self.container_client.update_container(src_container_name)
@@ -837,6 +855,7 @@
 
     @decorators.idempotent_id('5a9e2cc6-85b6-46fc-916d-0cbb7a88e5fd')
     def test_copy_object_with_x_fresh_metadata(self):
+        """Test copying objectwith x_fresh_metadata"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_object_name, data = self.create_object(self.container_name,
@@ -858,6 +877,7 @@
 
     @decorators.idempotent_id('a28a8b99-e701-4d7e-9d84-3b66f121460b')
     def test_copy_object_with_x_object_metakey(self):
+        """Test copying object with x_object_metakey"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_obj_name, data = self.create_object(self.container_name,
@@ -881,6 +901,7 @@
 
     @decorators.idempotent_id('edabedca-24c3-4322-9b70-d6d9f942a074')
     def test_copy_object_with_x_object_meta(self):
+        """Test copying object with x_object_meta"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_obj_name, data = self.create_object(self.container_name,
@@ -904,6 +925,7 @@
 
     @decorators.idempotent_id('e3e6a64a-9f50-4955-b987-6ce6767c97fb')
     def test_object_upload_in_segments(self):
+        """Test uploading object in segments"""
         # create object
         object_name = data_utils.rand_name(name='LObject')
         data = data_utils.arbitrary_string()
@@ -947,14 +969,17 @@
 
     @decorators.idempotent_id('50d01f12-526f-4360-9ac2-75dd508d7b68')
     def test_get_object_if_different(self):
-        # http://en.wikipedia.org/wiki/HTTP_ETag
-        # Make a conditional request for an object using the If-None-Match
-        # header, it should get downloaded only if the local file is different,
-        # otherwise the response code should be 304 Not Modified
+        """Test getting object content only when the local file is different
+
+        http://en.wikipedia.org/wiki/HTTP_ETag
+        Make a conditional request for an object using the If-None-Match
+        header, it should get downloaded only if the local file is different,
+        otherwise the response code should be 304 Not Modified
+        """
         object_name, data = self.create_object(self.container_name)
         # local copy is identical, no download
-        md5 = hashlib.md5(data).hexdigest()
-        headers = {'If-None-Match': md5}
+        object_md5 = md5(data, usedforsecurity=False).hexdigest()
+        headers = {'If-None-Match': object_md5}
         url = "%s/%s" % (self.container_name, object_name)
         resp, _ = self.object_client.get(url, headers=headers)
         self.assertEqual(resp['status'], '304')
@@ -968,13 +993,14 @@
 
         # local copy is different, download
         local_data = "something different"
-        md5 = hashlib.md5(local_data.encode()).hexdigest()
-        headers = {'If-None-Match': md5}
+        other_md5 = md5(local_data.encode(), usedforsecurity=False).hexdigest()
+        headers = {'If-None-Match': other_md5}
         resp, _ = self.object_client.get(url, headers=headers)
         self.assertHeaders(resp, 'Object', 'GET')
 
 
 class PublicObjectTest(base.BaseObjectTest):
+    """Test public storage object"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -1000,9 +1026,11 @@
 
     @decorators.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
     def test_access_public_container_object_without_using_creds(self):
-        # make container public-readable and access an object in it object
-        # anonymously, without using credentials
+        """Test accessing public container object without using credentials
 
+        Make container public-readable and access an object in it object
+        anonymously, without using credentials.
+        """
         # update container metadata to make it publicly readable
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
         resp_meta, body = (
@@ -1040,8 +1068,11 @@
 
     @decorators.idempotent_id('54e2a2fe-42dc-491b-8270-8e4217dd4cdc')
     def test_access_public_object_with_another_user_creds(self):
-        # make container public-readable and access an object in it using
-        # another user's credentials
+        """Test accessing public object with another user's credentials
+
+        Make container public-readable and access an object in it using
+        another user's credentials.
+        """
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
         resp_meta, body = (
             self.container_client.create_update_or_delete_container_metadata(
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index c66776e..22d12ce 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -12,15 +12,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
-
 from oslo_serialization import jsonutils as json
 
+from oslo_utils.secretutils import md5
 from tempest.api.object_storage import base
-from tempest.common import custom_matchers
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 # Each segment, except for the final one, must be at least 1 megabyte
@@ -28,6 +25,7 @@
 
 
 class ObjectSloTest(base.BaseObjectTest):
+    """Test static large object"""
 
     def setUp(self):
         super(ObjectSloTest, self).setUp()
@@ -35,11 +33,7 @@
         self.objects = []
 
     def tearDown(self):
-        for obj in self.objects:
-            test_utils.call_and_ignore_notfound_exc(
-                self.object_client.delete_object,
-                self.container_name, obj)
-        self.container_client.delete_container(self.container_name)
+        self.delete_containers()
         super(ObjectSloTest, self).tearDown()
 
     def _create_object(self, container_name, object_name, data, params=None):
@@ -70,10 +64,12 @@
         path_object_2 = '/%s/%s' % (self.container_name,
                                     object_name_base_2)
         data_manifest = [{'path': path_object_1,
-                          'etag': hashlib.md5(self.content).hexdigest(),
+                          'etag': md5(self.content,
+                                      usedforsecurity=False).hexdigest(),
                           'size_bytes': data_size},
                          {'path': path_object_2,
-                          'etag': hashlib.md5(self.content).hexdigest(),
+                          'etag': md5(self.content,
+                                      usedforsecurity=False).hexdigest(),
                           'size_bytes': data_size}]
 
         return json.dumps(data_manifest)
@@ -109,7 +105,7 @@
     @decorators.idempotent_id('2c3f24a6-36e8-4711-9aa2-800ee1fc7b5b')
     @utils.requires_ext(extension='slo', service='object')
     def test_upload_manifest(self):
-        # create static large object from multipart manifest
+        """Test creating static large object from multipart manifest"""
         manifest = self._create_manifest()
 
         params = {'multipart-manifest': 'put'}
@@ -124,7 +120,10 @@
     @decorators.idempotent_id('e69ad766-e1aa-44a2-bdd2-bf62c09c1456')
     @utils.requires_ext(extension='slo', service='object')
     def test_list_large_object_metadata(self):
-        # list static large object metadata using multipart manifest
+        """Test listing static large object metadata
+
+        List static large object metadata using multipart manifest
+        """
         object_name = self._create_large_object()
 
         resp, _ = self.object_client.list_object_metadata(
@@ -136,7 +135,7 @@
     @decorators.idempotent_id('49bc49bc-dd1b-4c0f-904e-d9f10b830ee8')
     @utils.requires_ext(extension='slo', service='object')
     def test_retrieve_large_object(self):
-        # list static large object using multipart manifest
+        """Test listing static large object using multipart manifest"""
         object_name = self._create_large_object()
 
         resp, body = self.object_client.get_object(
@@ -151,7 +150,7 @@
     @decorators.idempotent_id('87b6dfa1-abe9-404d-8bf0-6c3751e6aa77')
     @utils.requires_ext(extension='slo', service='object')
     def test_delete_large_object(self):
-        # delete static large object using multipart manifest
+        """Test deleting static large object using multipart manifest"""
         object_name = self._create_large_object()
 
         params_del = {'multipart-manifest': 'delete'}
@@ -160,18 +159,8 @@
             object_name,
             params=params_del)
 
-        # When deleting SLO using multipart manifest, the response contains
-        # not 'content-length' but 'transfer-encoding' header. This is the
-        # special case, therefore the existence of response headers is checked
-        # outside of custom matcher.
-        self.assertIn('transfer-encoding', resp)
-        self.assertIn('content-type', resp)
-        self.assertIn('x-trans-id', resp)
-        self.assertIn('date', resp)
+        self.assertHeaders(resp, 'Object', 'DELETE')
 
-        # Check only the format of common headers with custom matcher
-        self.assertThat(resp, custom_matchers.AreAllWellFormatted())
-
-        resp, body = self.container_client.list_container_objects(
+        resp, _ = self.container_client.list_container_objects(
             self.container_name)
         self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index b99f93a..e75e22a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
@@ -25,6 +24,7 @@
 
 
 class ObjectTempUrlTest(base.BaseObjectTest):
+    """Test object temp url"""
 
     @classmethod
     def resource_setup(cls):
@@ -90,6 +90,7 @@
     @decorators.idempotent_id('f91c96d4-1230-4bba-8eb9-84476d18d991')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_get_object_using_temp_url(self):
+        """Test getting object using temp url"""
         expires = self._get_expiry_date()
 
         # get a temp URL for the created object
@@ -109,6 +110,7 @@
     @decorators.idempotent_id('671f9583-86bd-4128-a034-be282a68c5d8')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_get_object_using_temp_url_key_2(self):
+        """Test getting object using metadata 'Temp-URL-Key-2'"""
         key2 = 'Meta2-'
         metadata = {'Temp-URL-Key-2': key2}
         self.account_client.create_update_or_delete_account_metadata(
@@ -134,6 +136,7 @@
     @decorators.idempotent_id('9b08dade-3571-4152-8a4f-a4f2a873a735')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_put_object_using_temp_url(self):
+        """Test putting object using temp url"""
         new_data = data_utils.random_bytes(size=len(self.object_name))
 
         expires = self._get_expiry_date()
@@ -160,6 +163,7 @@
     @decorators.idempotent_id('249a0111-5ad3-4534-86a7-1993d55f9185')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_head_object_using_temp_url(self):
+        """Test HEAD operation of object using temp url"""
         expires = self._get_expiry_date()
 
         # get a temp URL for the created object
@@ -174,6 +178,7 @@
     @decorators.idempotent_id('9d9cfd90-708b-465d-802c-e4a8090b823d')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_get_object_using_temp_url_with_inline_query_parameter(self):
+        """Test getting object using temp url with inline query parameter"""
         expires = self._get_expiry_date()
 
         # get a temp URL for the created object
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 17ae6c1..4ad8428 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
@@ -26,6 +25,7 @@
 
 
 class ObjectTempUrlNegativeTest(base.BaseObjectTest):
+    """Negative tests of object temp url"""
 
     metadata = {}
     containers = []
@@ -96,7 +96,7 @@
     @decorators.idempotent_id('5a583aca-c804-41ba-9d9a-e7be132bdf0b')
     @utils.requires_ext(extension='tempurl', service='object')
     def test_get_object_after_expiration_time(self):
-
+        """Test getting object after expiration time"""
         expires = self._get_expiry_date(1)
         # get a temp URL for the created object
         url = self._get_temp_url(self.container_name,
diff --git a/tempest/api/object_storage/test_object_version.py b/tempest/api/object_storage/test_object_version.py
index 75111b6..b64b172 100644
--- a/tempest/api/object_storage/test_object_version.py
+++ b/tempest/api/object_storage/test_object_version.py
@@ -24,6 +24,8 @@
 
 
 class ContainerTest(base.BaseObjectTest):
+    """Test versioned container"""
+
     def assertContainer(self, container, count, byte, versioned):
         resp, _ = self.container_client.list_container_metadata(container)
         self.assertHeaders(resp, 'Container', 'HEAD')
@@ -39,6 +41,15 @@
         not CONF.object_storage_feature_enabled.object_versioning,
         'Object-versioning is disabled')
     def test_versioned_container(self):
+        """Test versioned container
+
+        1. create container1
+        2. create container2, with container1 as 'X-versions-Location' header
+        3. create object1 in container1
+        4. create 2nd version of object1
+        5. delete object version 2
+        6. delete object version 1
+        """
         # create container
         vers_container_name = data_utils.rand_name(name='TestVersionContainer')
         resp, _ = self.container_client.update_container(vers_container_name)
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 1351704..3c76eca 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -20,6 +20,7 @@
 
 
 class BackendsCapabilitiesAdminTestsJSON(base.BaseVolumeAdminTest):
+    """Test backends capabilities"""
 
     @classmethod
     def resource_setup(cls):
@@ -32,14 +33,16 @@
 
     @decorators.idempotent_id('3750af44-5ea2-4cd4-bc3e-56e7e6caf854')
     def test_get_capabilities_backend(self):
-        # Test backend properties
+        """Test getting backend capabilities"""
         # Check response schema
         self.admin_capabilities_client.show_backend_capabilities(self.hosts[0])
 
     @decorators.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
     def test_compare_volume_stats_values(self):
-        # Test values comparison between show_backend_capabilities
-        # to show_pools
+        """Test comparing volume stats values
+
+        Compare volume stats between show_backend_capabilities and show_pools.
+        """
         VOLUME_STATS = ('vendor_name',
                         'volume_backend_name',
                         'storage_protocol')
diff --git a/tempest/api/volume/admin/test_encrypted_volumes_extend.py b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
new file mode 100644
index 0000000..7339179
--- /dev/null
+++ b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
@@ -0,0 +1,35 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.volume import base
+from tempest.api.volume import test_volumes_extend as extend
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class EncryptedVolumesExtendAttachedTest(extend.BaseVolumesExtendAttachedTest,
+                                         base.BaseVolumeAdminTest):
+    """Tests extending the size of an attached encrypted volume."""
+
+    @decorators.idempotent_id('e93243ec-7c37-4b5b-a099-ebf052c13216')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.extend_attached_encrypted_volume,
+        "Attached encrypted volume extend is disabled.")
+    @utils.services('compute')
+    def test_extend_attached_encrypted_volume_luksv1(self):
+        volume = self.create_encrypted_volume(encryption_provider="luks")
+        self._test_extend_attached_volume(volume)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index f695f51..ddfc78a 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -62,12 +62,25 @@
 
 
 class GroupSnapshotsTest(BaseGroupSnapshotsTest):
-    _api_version = 3
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    """Test group snapshot"""
+
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
     def test_group_snapshot_create_show_list_delete(self):
+        """Test create/show/list/delete group snapshot
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create volume "volume1" with "volume_type1" and "group1"
+        5. Create group snapshot "group_snapshot1" with "group1"
+        6. Check snapshot created from "volume1" reaches available status
+        7. Check the created group snapshot "group_snapshot1" is in the list
+           of all group snapshots
+        8. Delete group snapshot "group_snapshot1"
+        """
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -113,10 +126,23 @@
         self._delete_group_snapshot(group_snapshot)
         group_snapshots = self.group_snapshots_client.list_group_snapshots()[
             'group_snapshots']
-        self.assertEmpty(group_snapshots)
+        self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
+                         [(m['name'], m['id']) for m in group_snapshots])
 
     @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
     def test_create_group_from_group_snapshot(self):
+        """Test creating group from group snapshot
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create volume "volume1" with "volume_type1" and "group1"
+        5. Create group snapshot "group_snapshot1" with "group1"
+        6. Check snapshot created from "volume1" reaches available status
+        7. Create group "group2" from "group_snapshot1"
+        8. Check the volumes belonging to "group2" reach available status
+        9. Check "group2" reaches available status
+        """
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -160,6 +186,20 @@
     @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
     @decorators.related_bug('1739031')
     def test_delete_group_snapshots_following_updated_volumes(self):
+        """Test deleting group snapshot following updated volumes
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create 2 volumes "volume1" and "volume2"
+           with "volume_type1" and "group1"
+        5. For each created volume, removing and then adding back to "group1"
+        6. Create group snapshot "group_snapshot1" with "group1"
+        7. Check snapshots created from "volume1" and "volume2" reach
+           available status
+        8. Delete "group_snapshot1"
+        9. Check snapshots created from "volume1" and "volume2" are deleted
+        """
         volume_type = self.create_volume_type()
 
         group_type = self.create_group_type()
@@ -210,13 +250,15 @@
 
 
 class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
-    _api_version = 3
-    min_microversion = '3.19'
-    max_microversion = 'latest'
+    """Test group snapshot with volume microversion greater than 3.18"""
+
+    volume_min_microversion = '3.19'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
     @decorators.skip_because(bug='1770179')
     def test_reset_group_snapshot_status(self):
+        """Test resetting group snapshot status to creating/available/error"""
         # Create volume type
         volume_type = self.create_volume_type()
 
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
index c5e6d1a..63c3546 100644
--- a/tempest/api/volume/admin/test_group_type_specs.py
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -19,12 +19,14 @@
 
 
 class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    """Test group type specs"""
+
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
     def test_group_type_specs_create_show_update_list_delete(self):
+        """Test create/show/update/list/delete group type specs"""
         # Create new group type
         group_type = self.create_group_type()
 
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 6723207..8154682 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -19,13 +19,14 @@
 
 
 class GroupTypesTest(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.11'
-    max_microversion = 'latest'
+    """Test group types"""
+
+    volume_min_microversion = '3.11'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
-    def test_group_type_create_list_update_show(self):
-        # Create/list/show group type.
+    def test_group_type_create_list_update_show_delete(self):
+        """Test create/list/update/show/delete group type"""
         name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
         description = data_utils.rand_name("group-type-description")
         group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
@@ -33,7 +34,8 @@
                   'description': description,
                   'group_specs': group_specs,
                   'is_public': True}
-        body = self.create_group_type(**params)
+        body = self.admin_group_types_client.create_group_type(
+            **params)['group_type']
         self.assertIn('name', body)
         err_msg = ("The created group_type %(var)s is not equal to the "
                    "requested %(var)s")
@@ -63,3 +65,9 @@
             self.assertEqual(params[key], fetched_group_type[key],
                              '%s of the fetched group_type is different '
                              'from the created group_type' % key)
+
+        self.admin_group_types_client.delete_group_type(body['id'])
+        group_list = (
+            self.admin_group_types_client.list_group_types()['group_types'])
+        group_ids = [it['id'] for it in group_list]
+        self.assertNotIn(body['id'], group_ids)
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index 2f6eb6b..f16e4d2 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -23,12 +23,14 @@
 
 
 class GroupsTest(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.13'
-    max_microversion = 'latest'
+    """Tests of volume groups with microversion greater than 3.12"""
+
+    volume_min_microversion = '3.13'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('4b111d28-b73d-4908-9bd2-03dc2992e4d4')
     def test_group_create_show_list_delete(self):
+        """Test creating, showing, listing and deleting of volume group"""
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -95,6 +97,7 @@
 
     @decorators.idempotent_id('4a8a6fd2-8b3b-4641-8f54-6a6f99320006')
     def test_group_update(self):
+        """Test updating volume group"""
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -150,12 +153,14 @@
 
 
 class GroupsV314Test(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.14'
-    max_microversion = 'latest'
+    """Tests of volume groups with microversion greater than 3.13"""
+
+    volume_min_microversion = '3.14'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('2424af8c-7851-4888-986a-794b10c3210e')
     def test_create_group_from_group(self):
+        """Test creating volume group from volume group"""
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -185,12 +190,14 @@
 
 
 class GroupsV320Test(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.20'
-    max_microversion = 'latest'
+    """Tests of volume groups with microversion greater than 3.19"""
+
+    volume_min_microversion = '3.20'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('b20c696b-0cbc-49a5-8b3a-b1fb9338f45c')
     def test_reset_group_status(self):
+        """Test resetting volume group status to creating/available/error"""
         # Create volume type
         volume_type = self.create_volume_type()
 
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c5c70d2..83733bd 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -10,7 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 from tempest.api.volume import base
 from tempest.common import waiters
 from tempest import config
@@ -21,6 +20,7 @@
 
 
 class VolumeMultiBackendTest(base.BaseVolumeAdminTest):
+    """Test volume multi backends"""
 
     @classmethod
     def skip_checks(cls):
@@ -78,24 +78,49 @@
 
     @decorators.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
     def test_backend_name_reporting(self):
-        # get volume id which created by type without prefix
+        """Test backend name reporting for volume when type is without prefix
+
+        1. Create volume type, with 'volume_backend_name' as extra spec key
+        2. Create volume using the created volume type
+        3. Check 'os-vol-host-attr:host' of the volume info, the value should
+           contain '@' character, like 'cinder@CloveStorage#tecs_backend'
+        """
         for volume_id in self.volume_id_list_without_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @decorators.idempotent_id('f38e647f-ab42-4a31-a2e7-ca86a6485215')
     def test_backend_name_reporting_with_prefix(self):
-        # get volume id which created by type with prefix
+        """Test backend name reporting for volume when type is with prefix
+
+        1. Create volume type, with 'capabilities:volume_backend_name' as
+           extra spec key
+        2. Create volume using the created volume type
+        3. Check 'os-vol-host-attr:host' of the volume info, the value should
+           contain '@' character, like 'cinder@CloveStorage#tecs_backend'
+        """
         for volume_id in self.volume_id_list_with_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @decorators.idempotent_id('46435ab1-a0af-4401-8373-f14e66b0dd58')
     def test_backend_name_distinction(self):
-        # get volume ids which created by type without prefix
+        """Test volume backend distinction when type is without prefix
+
+        1. For each backend, create volume type with 'volume_backend_name'
+           as extra spec key
+        2. Create volumes using the created volume types
+        3. Check 'os-vol-host-attr:host' of each created volume is different.
+        """
         self._test_backend_name_distinction(self.volume_id_list_without_prefix)
 
     @decorators.idempotent_id('4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed')
     def test_backend_name_distinction_with_prefix(self):
-        # get volume ids which created by type without prefix
+        """Test volume backend distinction when type is with prefix
+
+        1. For each backend, create volume type with
+           'capabilities:volume_backend_name' as extra spec key
+        2. Create volumes using the created volume types
+        3. Check 'os-vol-host-attr:host' of each created volume is different.
+        """
         self._test_backend_name_distinction(self.volume_id_list_with_prefix)
 
     def _get_volume_host(self, volume_id):
@@ -123,4 +148,4 @@
         # assert that volumes are each created on separate hosts:
         msg = ("volumes %s were created in the same backend" % ", "
                .join(volume_hosts))
-        six.assertCountEqual(self, volume_hosts, set(volume_hosts), msg)
+        self.assertCountEqual(volume_hosts, set(volume_hosts), msg)
diff --git a/tempest/api/volume/admin/test_snapshot_manage.py b/tempest/api/volume/admin/test_snapshot_manage.py
index 37a47ec..ab0aa38 100644
--- a/tempest/api/volume/admin/test_snapshot_manage.py
+++ b/tempest/api/volume/admin/test_snapshot_manage.py
@@ -48,6 +48,7 @@
 
     @decorators.idempotent_id('0132f42d-0147-4b45-8501-cc504bbf7810')
     def test_unmanage_manage_snapshot(self):
+        """Test unmanaging and managing volume snapshot"""
         # Create a volume
         volume = self.create_volume()
 
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index 41849bc..4fca240 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -22,6 +22,8 @@
 
 
 class SnapshotsActionsTest(base.BaseVolumeAdminTest):
+    """Test volume snapshot actions"""
+
     @classmethod
     def skip_checks(cls):
         super(SnapshotsActionsTest, cls).skip_checks()
@@ -65,7 +67,7 @@
 
     @decorators.idempotent_id('3e13ca2f-48ea-49f3-ae1a-488e9180d535')
     def test_reset_snapshot_status(self):
-        # Reset snapshot status to creating
+        """Test resetting snapshot status to creating"""
         status = 'creating'
         self.admin_snapshots_client.reset_snapshot_status(
             self.snapshot['id'], status)
@@ -74,6 +76,10 @@
 
     @decorators.idempotent_id('41288afd-d463-485e-8f6e-4eea159413eb')
     def test_update_snapshot_status(self):
+        """Test updating snapshot
+
+        Update snapshot status to 'error' and progress to '80%'.
+        """
         # Reset snapshot status to creating
         status = 'creating'
         self.admin_snapshots_client.reset_snapshot_status(
@@ -95,20 +101,20 @@
 
     @decorators.idempotent_id('05f711b6-e629-4895-8103-7ca069f2073a')
     def test_snapshot_force_delete_when_snapshot_is_creating(self):
-        # test force delete when status of snapshot is creating
+        """Test force delete when status of snapshot is creating"""
         self._create_reset_and_force_delete_temp_snapshot('creating')
 
     @decorators.idempotent_id('92ce8597-b992-43a1-8868-6316b22a969e')
     def test_snapshot_force_delete_when_snapshot_is_deleting(self):
-        # test force delete when status of snapshot is deleting
+        """Test force delete when status of snapshot is deleting"""
         self._create_reset_and_force_delete_temp_snapshot('deleting')
 
     @decorators.idempotent_id('645a4a67-a1eb-4e8e-a547-600abac1525d')
     def test_snapshot_force_delete_when_snapshot_is_error(self):
-        # test force delete when status of snapshot is error
+        """Test force delete when status of snapshot is error"""
         self._create_reset_and_force_delete_temp_snapshot('error')
 
     @decorators.idempotent_id('bf89080f-8129-465e-9327-b2f922666ba5')
     def test_snapshot_force_delete_when_snapshot_is_error_deleting(self):
-        # test force delete when status of snapshot is error_deleting
+        """Test force delete when status of snapshot is error_deleting"""
         self._create_reset_and_force_delete_temp_snapshot('error_deleting')
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 8048017..00b7f3a 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -22,9 +22,10 @@
 
 
 class UserMessagesTest(base.BaseVolumeAdminTest):
-    _api_version = 3
-    min_microversion = '3.3'
-    max_microversion = 'latest'
+    """Test volume messages with microversion greater than 3.2"""
+
+    volume_min_microversion = '3.3'
+    volume_max_microversion = 'latest'
 
     def _create_user_message(self):
         """Trigger a 'no valid host' situation to generate a message."""
@@ -51,6 +52,7 @@
 
     @decorators.idempotent_id('50f29e6e-f363-42e1-8ad1-f67ae7fd4d5a')
     def test_list_show_messages(self):
+        """Test listing and showing volume messages"""
         message_id = self._create_user_message()
         self.addCleanup(self.messages_client.delete_message, message_id)
 
@@ -62,6 +64,7 @@
 
     @decorators.idempotent_id('c6eb6901-cdcc-490f-b735-4fe251842aed')
     def test_delete_message(self):
+        """Test deleting volume messages"""
         message_id = self._create_user_message()
         self.messages_client.delete_message(message_id)
         self.messages_client.wait_for_resource_deletion(message_id)
diff --git a/tempest/api/volume/admin/test_volume_hosts.py b/tempest/api/volume/admin/test_volume_hosts.py
index 83c27e1..e4e15c5 100644
--- a/tempest/api/volume/admin/test_volume_hosts.py
+++ b/tempest/api/volume/admin/test_volume_hosts.py
@@ -18,9 +18,11 @@
 
 
 class VolumeHostsAdminTestsJSON(base.BaseVolumeAdminTest):
+    """Test fetching volume hosts info by admin users"""
 
     @decorators.idempotent_id('d5f3efa2-6684-4190-9ced-1c2f526352ad')
     def test_list_hosts(self):
+        """Test listing volume hosts"""
         hosts = self.admin_hosts_client.list_hosts()['hosts']
         self.assertGreaterEqual(len(hosts), 2,
                                 "The count of volume hosts is < 2, "
@@ -28,6 +30,7 @@
 
     @decorators.idempotent_id('21168d57-b373-4b71-a3ac-f2c88f0c5d31')
     def test_show_host(self):
+        """Test getting volume host details"""
         hosts = self.admin_hosts_client.list_hosts()['hosts']
         self.assertGreaterEqual(len(hosts), 2,
                                 "The count of volume hosts is < 2, "
diff --git a/tempest/api/volume/admin/test_volume_manage.py b/tempest/api/volume/admin/test_volume_manage.py
index 4b352e0..1e4e7cb 100644
--- a/tempest/api/volume/admin/test_volume_manage.py
+++ b/tempest/api/volume/admin/test_volume_manage.py
@@ -24,6 +24,7 @@
 
 
 class VolumeManageAdminTest(base.BaseVolumeAdminTest):
+    """Test volume manage by admin users"""
 
     @classmethod
     def skip_checks(cls):
@@ -39,6 +40,7 @@
 
     @decorators.idempotent_id('70076c71-0ce1-4208-a8ff-36a66e65cc1e')
     def test_unmanage_manage_volume(self):
+        """Test unmanaging and managing volume"""
         # Create original volume
         org_vol_id = self.create_volume()['id']
         org_vol_info = self.admin_volume_client.show_volume(
diff --git a/tempest/api/volume/admin/test_volume_pools.py b/tempest/api/volume/admin/test_volume_pools.py
index 744bc01..9424994 100644
--- a/tempest/api/volume/admin/test_volume_pools.py
+++ b/tempest/api/volume/admin/test_volume_pools.py
@@ -21,6 +21,8 @@
 
 
 class VolumePoolsAdminTestsJSON(base.BaseVolumeAdminTest):
+    """Test getting volume pools by admin users"""
+
     def _assert_pools(self, with_detail=False):
         cinder_pools = self.admin_scheduler_stats_client.list_pools(
             detail=with_detail)['pools']
@@ -33,8 +35,10 @@
 
     @decorators.idempotent_id('0248a46c-e226-4933-be10-ad6fca8227e7')
     def test_get_pools_without_details(self):
+        """Test getting volume pools without detail"""
         self._assert_pools()
 
     @decorators.idempotent_id('d4bb61f7-762d-4437-b8a4-5785759a0ced')
     def test_get_pools_with_details(self):
+        """Test getting volume pools with detail"""
         self._assert_pools(with_detail=True)
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index ee52354..f482788 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -30,6 +30,7 @@
 
 
 class VolumeQuotaClassesTest(base.BaseVolumeAdminTest):
+    """Test volume quota classes"""
 
     def setUp(self):
         # Note(jeremy.zhang): All test cases in this class need to externally
@@ -44,6 +45,7 @@
 
     @decorators.idempotent_id('abb9198e-67d0-4b09-859f-4f4a1418f176')
     def test_show_default_quota(self):
+        """Test showing default volume quota class set"""
         # response body is validated by schema
         default_quotas = self.admin_quota_classes_client.show_quota_class_set(
             'default')['quota_class_set']
@@ -51,6 +53,11 @@
 
     @decorators.idempotent_id('a7644c63-2669-467a-b00e-452dd5c5397b')
     def test_update_default_quota(self):
+        """Test updating default volume quota class set
+
+        Check current project and new project's default quota are updated
+        to the provided one.
+        """
         LOG.debug("Get the current default quota class values")
         body = self.admin_quota_classes_client.show_quota_class_set(
             'default')['quota_class_set']
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index b073604..6b58189 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -22,6 +22,8 @@
 
 
 class VolumeQuotasAdminTestJSON(base.BaseVolumeAdminTest):
+    """Test volume quotas with admin privilege"""
+
     credentials = ['primary', 'alt', 'admin']
 
     def setUp(self):
@@ -54,17 +56,19 @@
 
     @decorators.idempotent_id('59eada70-403c-4cef-a2a3-a8ce2f1b07a0')
     def test_list_quotas(self):
+        """Test showing volume quota set"""
         # Check response schema
         self.admin_quotas_client.show_quota_set(self.demo_tenant_id)
 
     @decorators.idempotent_id('2be020a2-5fdd-423d-8d35-a7ffbc36e9f7')
     def test_list_default_quotas(self):
+        """Test showing volume default quota set"""
         # Check response schema
         self.admin_quotas_client.show_default_quota_set(self.demo_tenant_id)
 
     @decorators.idempotent_id('3d45c99e-cc42-4424-a56e-5cbd212b63a6')
     def test_update_all_quota_resources_for_tenant(self):
-        # Admin can update all the resource quota limits for a tenant
+        """Test admin can update all the volume quota limits for a project"""
         new_quota_set = {'gigabytes': 1009,
                          'volumes': 11,
                          'snapshots': 11,
@@ -83,18 +87,18 @@
         # test that the specific values we set are actually in
         # the final result. There is nothing here that ensures there
         # would be no other values in there.
-        self.assertDictContainsSubset(new_quota_set, quota_set)
+        self.assertLessEqual(new_quota_set.items(), quota_set.items())
 
     @decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed')
     def test_show_quota_usage(self):
+        """Test showing volume quota usage"""
         # Check response schema
         self.admin_quotas_client.show_quota_set(
             self.os_admin.credentials.tenant_id, params={'usage': True})
 
     @decorators.idempotent_id('874b35a9-51f1-4258-bec5-cd561b6690d3')
     def test_delete_quota(self):
-        # Admin can delete the resource quota set for a project
-
+        """Test admin can delete the volume quota set for a project"""
         self.addCleanup(self.admin_quotas_client.update_quota_set,
                         self.demo_tenant_id, **self.cleanup_quota_set)
 
@@ -112,6 +116,7 @@
 
     @decorators.idempotent_id('ae8b6091-48ad-4bfa-a188-bbf5cc02115f')
     def test_quota_usage(self):
+        """Test volume quota usage is updated after creating volume"""
         quota_usage = self.admin_quotas_client.show_quota_set(
             self.demo_tenant_id, params={'usage': True})['quota_set']
 
@@ -131,6 +136,7 @@
 
     @decorators.idempotent_id('8911036f-9d54-4720-80cc-a1c9796a8805')
     def test_quota_usage_after_volume_transfer(self):
+        """Test volume quota usage is updated after transferring volume"""
         # Create a volume for transfer
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 5c7ab15..937d28b 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -24,6 +24,7 @@
 
 
 class VolumeQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
+    """Negative tests of volume quotas"""
 
     @classmethod
     def setup_credentials(cls):
@@ -52,6 +53,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('bf544854-d62a-47f2-a681-90f7a47d86b6')
     def test_quota_volumes(self):
+        """Creating more volume than allowed quota will fail"""
         self.admin_quotas_client.update_quota_set(self.demo_tenant_id,
                                                   volumes=1, gigabytes=-1)
         self.assertRaises(lib_exc.OverLimit,
@@ -61,6 +63,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('2dc27eee-8659-4298-b900-169d71a91374')
     def test_quota_volume_gigabytes(self):
+        """Creating volume with size larger than allowed quota will fail"""
         self.admin_quotas_client.update_quota_set(
             self.demo_tenant_id, gigabytes=CONF.volume.volume_size, volumes=-1)
         self.assertRaises(lib_exc.OverLimit,
@@ -70,6 +73,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d321dc21-d8c6-401f-95fe-49f4845f1a6d')
     def test_volume_extend_gigabytes_quota_deviation(self):
+        """Extending volume with size larger than allowed quota will fail"""
         self.admin_quotas_client.update_quota_set(
             self.demo_tenant_id, gigabytes=CONF.volume.volume_size)
         self.assertRaises(lib_exc.OverLimit,
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 18e0b9b..4a3f494 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -13,6 +13,7 @@
 import abc
 
 from oslo_log import log as logging
+import testtools
 
 from tempest.api.volume import base
 from tempest.common import waiters
@@ -88,6 +89,7 @@
 
 
 class VolumeRetypeWithMigrationTest(VolumeRetypeTest):
+    """Test volume retype with migration"""
 
     @classmethod
     def skip_checks(cls):
@@ -134,11 +136,27 @@
 
     @decorators.idempotent_id('a1a41f3f-9dad-493e-9f09-3ff197d477cd')
     def test_available_volume_retype_with_migration(self):
+        """Test volume retype with migration
+
+        1. Create volume1 with volume_type1
+        2. Retype volume1 to volume_type2 with migration_policy='on-demand'
+        3. Check volume1's volume_type is changed to volume_type2, and
+           'os-vol-host-attr:host' in the volume info is changed.
+        """
         src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
         self._retype_volume(src_vol, migration_policy='on-demand')
 
     @decorators.idempotent_id('d0d9554f-e7a5-4104-8973-f35b27ccb60d')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          "Cinder volume snapshots are disabled.")
     def test_volume_from_snapshot_retype_with_migration(self):
+        """Test volume created from snapshot retype with migration
+
+        1. Create volume1 from snapshot with volume_type1
+        2. Retype volume1 to volume_type2 with migration_policy='on-demand'
+        3. Check volume1's volume_type is changed to volume_type2, and
+           'os-vol-host-attr:host' in the volume info is changed.
+        """
         src_vol = self._create_volume_from_snapshot()
 
         # Migrate the volume from snapshot to the second backend
@@ -146,6 +164,7 @@
 
 
 class VolumeRetypeWithoutMigrationTest(VolumeRetypeTest):
+    """Test volume retype without migration"""
 
     @classmethod
     def resource_setup(cls):
@@ -174,6 +193,13 @@
 
     @decorators.idempotent_id('b90412ee-465d-46e9-b249-ec84a47d5f25')
     def test_available_volume_retype(self):
+        """Test volume retype without migration
+
+        1. Create volume1 with volume_type1
+        2. Retype volume1 to volume_type2 with migration_policy='never'
+        3. Check volume1's volume_type is changed to volume_type2, and
+           'os-vol-host-attr:host' in the volume info is not changed.
+        """
         src_vol = self.create_volume(volume_type=self.src_vol_type['name'])
 
         # Retype the volume from snapshot
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 293af81..1d12a73 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -39,12 +39,14 @@
 
     @decorators.idempotent_id('e0218299-0a59-4f43-8b2b-f1c035b3d26d')
     def test_list_services(self):
+        """Test listing volume services"""
         services = (self.admin_volume_services_client.list_services()
                     ['services'])
         self.assertNotEmpty(services)
 
     @decorators.idempotent_id('63a3e1ca-37ee-4983-826d-83276a370d25')
     def test_get_service_by_service_binary_name(self):
+        """Test getting volume service by binary name"""
         services = (self.admin_volume_services_client.list_services(
             binary=self.binary_name)['services'])
         self.assertNotEmpty(services)
@@ -53,6 +55,7 @@
 
     @decorators.idempotent_id('178710e4-7596-4e08-9333-745cb8bc4f8d')
     def test_get_service_by_host_name(self):
+        """Test getting volume service by service host name"""
         services_on_host = [service for service in self.services if
                             _get_host(service['host']) == self.host_name]
 
@@ -69,6 +72,7 @@
 
     @decorators.idempotent_id('67ec6902-f91d-4dec-91fa-338523208bbc')
     def test_get_service_by_volume_host_name(self):
+        """Test getting volume service by volume host name"""
         volume_id = self.create_volume()['id']
         volume = self.admin_volume_client.show_volume(volume_id)['volume']
         hostname = _get_host(volume['os-vol-host-attr:host'])
@@ -83,7 +87,7 @@
 
     @decorators.idempotent_id('ffa6167c-4497-4944-a464-226bbdb53908')
     def test_get_service_by_service_and_host_name(self):
-
+        """Test getting volume service by binary name and host name"""
         services = (self.admin_volume_services_client.list_services(
             host=self.host_name, binary=self.binary_name))['services']
 
diff --git a/tempest/api/volume/admin/test_volume_services_negative.py b/tempest/api/volume/admin/test_volume_services_negative.py
index 3a863a1..bf39be5 100644
--- a/tempest/api/volume/admin/test_volume_services_negative.py
+++ b/tempest/api/volume/admin/test_volume_services_negative.py
@@ -19,6 +19,7 @@
 
 
 class VolumeServicesNegativeTest(base.BaseVolumeAdminTest):
+    """Negative tests of volume services"""
 
     @classmethod
     def resource_setup(cls):
@@ -30,6 +31,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
     def test_enable_service_with_invalid_host(self):
+        """Test enabling volume service with invalid host should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.admin_volume_services_client.enable_service,
                           host='invalid_host', binary=self.binary)
@@ -37,6 +39,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('c571f179-c6e6-4c50-a0ab-368b628a8ac1')
     def test_disable_service_with_invalid_binary(self):
+        """Test disabling volume service with invalid binary should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.admin_volume_services_client.disable_service,
                           host=self.host, binary='invalid_binary')
@@ -44,6 +47,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('77767b36-5e8f-4c68-a0b5-2308cc21ec64')
     def test_disable_log_reason_with_no_reason(self):
+        """Test disabling volume service with none reason should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_volume_services_client.disable_log_reason,
                           host=self.host, binary=self.binary,
@@ -52,6 +56,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('712bfab8-1f44-4eb5-a632-fa70bf78f05e')
     def test_freeze_host_with_invalid_host(self):
+        """Test freezing volume service with invalid host should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_volume_services_client.freeze_host,
                           host='invalid_host')
@@ -59,6 +64,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('7c6287c9-d655-47e1-9a11-76f6657a6dce')
     def test_thaw_host_with_invalid_host(self):
+        """Test thawing volume service with invalid host should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_volume_services_client.thaw_host,
                           host='invalid_host')
diff --git a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
index ff5e7e2..10fd485 100644
--- a/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py
@@ -24,6 +24,7 @@
 
 
 class VolumeSnapshotQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
+    """Negative tests of volume snapshot quotas"""
 
     @classmethod
     def skip_checks(cls):
@@ -67,6 +68,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('02bbf63f-6c05-4357-9d98-2926a94064ff')
     def test_quota_volume_snapshots(self):
+        """Test creating snapshot exceeding snapshots quota should fail"""
         self.assertRaises(lib_exc.OverLimit,
                           self.snapshots_client.create_snapshot,
                           volume_id=self.volume['id'])
@@ -74,6 +76,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('c99a1ca9-6cdf-498d-9fdf-25832babef27')
     def test_quota_volume_gigabytes_snapshots(self):
+        """Test creating snapshot exceeding gigabytes quota should fail"""
         self.addCleanup(self.admin_quotas_client.update_quota_set,
                         self.demo_tenant_id,
                         **self.shared_quota_set)
diff --git a/tempest/api/volume/admin/test_volume_type_access.py b/tempest/api/volume/admin/test_volume_type_access.py
index b64face..55ec428 100644
--- a/tempest/api/volume/admin/test_volume_type_access.py
+++ b/tempest/api/volume/admin/test_volume_type_access.py
@@ -24,11 +24,13 @@
 
 
 class VolumeTypesAccessTest(base.BaseVolumeAdminTest):
+    """Test volume type access"""
 
     credentials = ['primary', 'alt', 'admin']
 
     @decorators.idempotent_id('d4dd0027-835f-4554-a6e5-50903fb79184')
     def test_volume_type_access_add(self):
+        """Test adding volume type access for non-admin project"""
         # Creating a NON public volume type
         params = {'os-volume-type-access:is_public': False}
         volume_type = self.create_volume_type(**params)
@@ -52,6 +54,7 @@
 
     @decorators.idempotent_id('5220eb28-a435-43ce-baaf-ed46f0e95159')
     def test_volume_type_access_list(self):
+        """Test listing volume type access"""
         # Creating a NON public volume type
         params = {'os-volume-type-access:is_public': False}
         volume_type = self.create_volume_type(**params)
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index c1ceeb7..98ae83b 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -23,17 +23,18 @@
 
 
 class VolumeTypesTest(base.BaseVolumeAdminTest):
+    """Test volume types"""
 
     @decorators.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
     def test_volume_type_list(self):
-        # List volume types.
+        """Test listing volume types"""
         body = \
             self.admin_volume_types_client.list_volume_types()['volume_types']
         self.assertIsInstance(body, list)
 
     @decorators.idempotent_id('c03cc62c-f4e9-4623-91ec-64ce2f9c1260')
     def test_volume_crud_with_volume_type_and_extra_specs(self):
-        # Create/update/get/delete volume with volume_type and extra spec.
+        """Test create/update/get/delete volume with volume_type"""
         volume_types = list()
         vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume')
         proto = CONF.volume.storage_protocol
@@ -80,7 +81,7 @@
 
     @decorators.idempotent_id('4e955c3b-49db-4515-9590-0c99f8e471ad')
     def test_volume_type_create_get_delete(self):
-        # Create/get volume type.
+        """Test create/get/delete volume type"""
         name = data_utils.rand_name(self.__class__.__name__ + '-volume-type')
         description = data_utils.rand_name("volume-type-description")
         proto = CONF.volume.storage_protocol
@@ -118,8 +119,10 @@
 
     @decorators.idempotent_id('7830abd0-ff99-4793-a265-405684a54d46')
     def test_volume_type_encryption_create_get_update_delete(self):
-        # Create/get/update/delete encryption type.
+        """Test create/get/update/delete volume encryption type"""
         create_kwargs = {'provider': 'LuksEncryptor',
+                         'key_size': 256,
+                         'cipher': 'aes-xts-plain64',
                          'control_location': 'front-end'}
         volume_type_id = self.create_volume_type()['id']
 
@@ -127,7 +130,6 @@
         encryption_type = \
             self.admin_encryption_types_client.create_encryption_type(
                 volume_type_id, **create_kwargs)['encryption']
-        self.assertIn('volume_type_id', encryption_type)
         for key in create_kwargs:
             self.assertEqual(create_kwargs[key], encryption_type[key],
                              'The created encryption_type %s is different '
@@ -176,6 +178,7 @@
 
     @decorators.idempotent_id('cf9f07c6-db9e-4462-a243-5933ad65e9c8')
     def test_volume_type_update(self):
+        """Test updating volume type details"""
         # Create volume type
         volume_type = self.create_volume_type()
 
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index 730acdf..852aa93 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -19,6 +19,7 @@
 
 
 class VolumeTypesExtraSpecsTest(base.BaseVolumeAdminTest):
+    """Test volume type extra specs"""
 
     @classmethod
     def resource_setup(cls):
@@ -27,7 +28,7 @@
 
     @decorators.idempotent_id('b42923e9-0452-4945-be5b-d362ae533e60')
     def test_volume_type_extra_specs_list(self):
-        # List Volume types extra specs.
+        """Test listing volume type extra specs"""
         extra_specs = {"spec1": "val1"}
         body = self.admin_volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)['extra_specs']
@@ -40,7 +41,7 @@
 
     @decorators.idempotent_id('0806db36-b4a0-47a1-b6f3-c2e7f194d017')
     def test_volume_type_extra_specs_update(self):
-        # Update volume type extra specs
+        """Test updating volume type extra specs"""
         extra_specs = {"spec2": "val1"}
         body = self.admin_volume_types_client.create_volume_type_extra_specs(
             self.volume_type['id'], extra_specs)['extra_specs']
@@ -74,7 +75,7 @@
 
     @decorators.idempotent_id('d4772798-601f-408a-b2a5-29e8a59d1220')
     def test_volume_type_extra_spec_create_get_delete(self):
-        # Create/Get/Delete volume type extra spec.
+        """Test Create/Get/Delete volume type extra specs"""
         spec_key = "spec3"
         extra_specs = {spec_key: "val1"}
         body = self.admin_volume_types_client.create_volume_type_extra_specs(
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index fe249d6..70a62ff 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -20,6 +20,7 @@
 
 
 class ExtraSpecsNegativeTest(base.BaseVolumeAdminTest):
+    """Negative tests of volume type extra specs"""
 
     @classmethod
     def resource_setup(cls):
@@ -30,7 +31,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
     def test_update_no_body(self):
-        # Should not update volume type extra specs with no body
+        """Test updating volume type extra specs with no body should fail"""
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.update_volume_type_extra_specs,
@@ -39,7 +40,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
     def test_update_nonexistent_extra_spec_id(self):
-        # Should not update volume type extra specs with nonexistent id.
+        """Test updating volume type extra specs with non existent name
+
+        Updating volume type extra specs with non existent extra spec name
+        should fail.
+        """
         extra_spec = {"spec1": "val2"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -50,7 +55,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
     def test_update_none_extra_spec_id(self):
-        # Should not update volume type extra specs with none id.
+        """Test updating volume type extra specs without name
+
+        Updating volume type extra specs without extra spec name should fail.
+        """
         extra_spec = {"spec1": "val2"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -60,8 +68,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
     def test_update_multiple_extra_spec(self):
-        # Should not update volume type extra specs with multiple specs as
-        # body.
+        """Test updating multiple volume type extra specs should fail"""
         extra_spec = {"spec1": "val2", "spec2": "val1"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -70,10 +77,28 @@
             extra_spec)
 
     @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('474090d2-0824-eb3b-9335-f506b4aa49d8')
+    def test_update_nonexistent_type_id(self):
+        """Test update volume type extra specs for non existent volume type
+
+        Update volume type extra specs for non existent volume type should
+        fail.
+        """
+        spec_key = "spec1"
+        extra_spec = {spec_key: "val5"}
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.admin_volume_types_client.update_volume_type_extra_specs,
+            data_utils.rand_uuid(), spec_key, extra_spec)
+
+    @decorators.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
-        # Should not create volume type extra spec for nonexistent volume
-        # type id.
+        """Test creating volume type extra specs for non existent volume type
+
+        Creating volume type extra specs for non existent volume type should
+        fail.
+        """
         extra_specs = {"spec2": "val1"}
         self.assertRaises(
             lib_exc.NotFound,
@@ -83,7 +108,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
     def test_create_none_body(self):
-        # Should not create volume type extra spec for none POST body.
+        """Test creating volume type extra spec with none POST body
+
+        Creating volume type extra spec with none POST body should fail.
+        """
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type_extra_specs,
@@ -92,7 +120,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
     def test_create_invalid_body(self):
-        # Should not create volume type extra spec for invalid POST body.
+        """Test creating volume type extra spec with invalid POST body
+
+        Creating volume type extra spec with invalid POST body should fail.
+        """
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type_extra_specs,
@@ -101,8 +132,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
-        # Should not delete volume type extra spec for nonexistent
-        # type id.
+        """Test deleting volume type extra spec for non existent volume type
+
+        Deleting volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.delete_volume_type_extra_specs,
@@ -111,7 +145,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
     def test_list_nonexistent_volume_type_id(self):
-        # Should not list volume type extra spec for nonexistent type id.
+        """Test listing volume type extra spec for non existent volume type
+
+        Listing volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.list_volume_types_extra_specs,
@@ -120,7 +158,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
     def test_get_nonexistent_volume_type_id(self):
-        # Should not get volume type extra spec for nonexistent type id.
+        """Test getting volume type extra spec for non existent volume type
+
+        Getting volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.show_volume_type_extra_specs,
@@ -129,8 +171,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
     def test_get_nonexistent_extra_spec_name(self):
-        # Should not get volume type extra spec for nonexistent extra spec
-        # name.
+        """Test getting volume type extra spec for non existent spec name
+
+        Getting volume type extra spec for non existent extra spec name should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.show_volume_type_extra_specs,
diff --git a/tempest/api/volume/admin/test_volume_types_negative.py b/tempest/api/volume/admin/test_volume_types_negative.py
index ae29049..f37c427 100644
--- a/tempest/api/volume/admin/test_volume_types_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_negative.py
@@ -20,11 +20,12 @@
 
 
 class VolumeTypesNegativeTest(base.BaseVolumeAdminTest):
+    """Negative tests of volume type"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('878b4e57-faa2-4659-b0d1-ce740a06ae81')
     def test_create_with_empty_name(self):
-        # Should not be able to create volume type with an empty name.
+        """Test creating volume type with an empty name will fail"""
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type, name='')
@@ -32,7 +33,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('994610d6-0476-4018-a644-a2602ef5d4aa')
     def test_get_nonexistent_type_id(self):
-        # Should not be able to get volume type with nonexistent type id.
+        """Test getting volume type with nonexistent type id will fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.admin_volume_types_client.show_volume_type,
                           data_utils.rand_uuid())
@@ -40,7 +41,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6b3926d2-7d73-4896-bc3d-e42dfd11a9f6')
     def test_delete_nonexistent_type_id(self):
-        # Should not be able to delete volume type with nonexistent type id.
+        """Test deleting volume type with nonexistent type id will fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.admin_volume_types_client.delete_volume_type,
                           data_utils.rand_uuid())
@@ -48,8 +49,33 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8c09f849-f225-4d78-ba87-bffd9a5e0c6f')
     def test_create_volume_with_private_volume_type(self):
-        # Should not be able to create volume with private volume type.
+        """Test creating volume with private volume type will fail"""
         params = {'os-volume-type-access:is_public': False}
         volume_type = self.create_volume_type(**params)
         self.assertRaises(lib_exc.NotFound,
                           self.create_volume, volume_type=volume_type['id'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('a5924b5f-b6c1-49ba-994c-b4af55d26e52')
+    def test_create_volume_type_encryption_nonexistent_type_id(self):
+        """Test create encryption with nonexistent type id will fail"""
+        create_kwargs = {
+            'type_id': data_utils.rand_uuid(),
+            'provider': 'LuksEncryptor',
+            'key_size': 256,
+            'cipher': 'aes-xts-plain64',
+            'control_location': 'front-end'
+            }
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.create_encryption_type, **create_kwargs)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('969b10c7-3d77-4e1b-a4f2-2d265980f7e5')
+    def test_create_with_repeated_name(self):
+        """Test creating volume type with a repeated name will fail"""
+        volume_type_name = self.create_volume_type()['name']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.admin_volume_types_client.create_volume_type,
+            name=volume_type_name)
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 3e0deef..ecddfba 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -23,6 +23,9 @@
 
 
 class VolumesActionsTest(base.BaseVolumeAdminTest):
+    """Test volume actions"""
+
+    create_default_network = True
 
     def _create_reset_and_force_delete_temp_volume(self, status=None):
         # Create volume, reset volume status, and force delete temp volume
@@ -37,7 +40,10 @@
 
     @decorators.idempotent_id('d063f96e-a2e0-4f34-8b8a-395c42de1845')
     def test_volume_reset_status(self):
-        # test volume reset status : available->error->available->maintenance
+        """Test resetting volume status
+
+        Reset volume status to available->error->available->maintenance
+        """
         volume = self.create_volume()
         self.addCleanup(waiters.wait_for_volume_resource_status,
                         self.volumes_client, volume['id'], 'available')
@@ -51,27 +57,28 @@
 
     @decorators.idempotent_id('21737d5a-92f2-46d7-b009-a0cc0ee7a570')
     def test_volume_force_delete_when_volume_is_creating(self):
-        # test force delete when status of volume is creating
+        """Test force deleting volume when its status is creating"""
         self._create_reset_and_force_delete_temp_volume('creating')
 
     @decorators.idempotent_id('db8d607a-aa2e-4beb-b51d-d4005c232011')
     def test_volume_force_delete_when_volume_is_attaching(self):
-        # test force delete when status of volume is attaching
+        """Test force deleting volume when its status is attaching"""
         self._create_reset_and_force_delete_temp_volume('attaching')
 
     @decorators.idempotent_id('3e33a8a8-afd4-4d64-a86b-c27a185c5a4a')
     def test_volume_force_delete_when_volume_is_error(self):
-        # test force delete when status of volume is error
+        """Test force deleting volume when its status is error"""
         self._create_reset_and_force_delete_temp_volume('error')
 
     @decorators.idempotent_id('b957cabd-1486-4e21-90cf-a9ed3c39dfb2')
     def test_volume_force_delete_when_volume_is_maintenance(self):
-        # test force delete when status of volume is maintenance
+        """Test force deleting volume when its status is maintenance"""
         self._create_reset_and_force_delete_temp_volume('maintenance')
 
     @decorators.idempotent_id('d38285d9-929d-478f-96a5-00e66a115b81')
     @utils.services('compute')
     def test_force_detach_volume(self):
+        """Test force detaching volume when its status is error"""
         # Create a server and a volume
         server_id = self.create_server()['id']
         volume_id = self.create_volume()['id']
@@ -101,5 +108,4 @@
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume_id, 'available')
         vol_info = self.volumes_client.show_volume(volume_id)['volume']
-        self.assertIn('attachments', vol_info)
         self.assertEmpty(vol_info['attachments'])
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 45060d0..835cc1d 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -26,6 +26,7 @@
 
 
 class VolumesBackupsAdminTest(base.BaseVolumeAdminTest):
+    """Test volume backups"""
 
     @classmethod
     def skip_checks(cls):
@@ -67,11 +68,8 @@
         # Export Backup
         export_backup = (self.admin_backups_client.export_backup(backup['id'])
                          ['backup-record'])
-        self.assertIn('backup_service', export_backup)
-        self.assertIn('backup_url', export_backup)
         self.assertTrue(export_backup['backup_service'].startswith(
                         'cinder.backup.drivers'))
-        self.assertIsNotNone(export_backup['backup_url'])
 
         # NOTE(geguileo): Backups are imported with the same backup id
         # (important for incremental backups among other things), so we cannot
@@ -92,7 +90,6 @@
         # deletions will delete data from the backup back-end because they
         # were both pointing to the same backend data.
         self.addCleanup(self._delete_backup, new_id)
-        self.assertIn("id", import_backup)
         self.assertEqual(new_id, import_backup['id'])
         waiters.wait_for_volume_resource_status(self.admin_backups_client,
                                                 import_backup['id'],
@@ -122,6 +119,7 @@
 
     @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
     def test_volume_backup_reset_status(self):
+        """Test resetting volume backup status to error"""
         # Create a volume
         volume = self.create_volume()
         # Create a backup
diff --git a/tempest/api/volume/admin/test_volumes_list.py b/tempest/api/volume/admin/test_volumes_list.py
index 6ce4a85..c3229f0 100644
--- a/tempest/api/volume/admin/test_volumes_list.py
+++ b/tempest/api/volume/admin/test_volumes_list.py
@@ -24,6 +24,7 @@
 
 
 class VolumesListAdminTestJSON(base.BaseVolumeAdminTest):
+    """Test listing volumes with admin privilege"""
 
     @classmethod
     def resource_setup(cls):
@@ -41,7 +42,7 @@
 
     @decorators.idempotent_id('5866286f-3290-4cfd-a414-088aa6cdc469')
     def test_volume_list_param_tenant(self):
-        # Test to list volumes from single tenant
+        """Test admin can list volumes belonging to specified project"""
         # Create a volume in admin tenant
         adm_vol = self.admin_volume_client.create_volume(
             size=CONF.volume.volume_size)['volume']
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 1bfd075..6e34dd6 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -13,14 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.api.volume import api_microversion_fixture
 from tempest.common import compute
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
-from tempest.lib import exceptions
 import tempest.test
 
 CONF = config.CONF
@@ -30,12 +29,9 @@
                      tempest.test.BaseTestCase):
     """Base test case class for all Cinder API tests."""
 
-    _api_version = 2
-    # if api_v2 is not enabled while api_v3 is enabled, the volume v2 classes
-    # should be transferred to volume v3 classes.
-    if (not CONF.volume_feature_enabled.api_v2 and
-        CONF.volume_feature_enabled.api_v3):
-        _api_version = 3
+    # Set this to True in subclasses to create a default network. See
+    # https://bugs.launchpad.net/tempest/+bug/1844568
+    create_default_network = False
     credentials = ['primary']
 
     @classmethod
@@ -45,25 +41,16 @@
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
-        if cls._api_version == 2:
-            if not CONF.volume_feature_enabled.api_v2:
-                msg = "Volume API v2 is disabled"
-                raise cls.skipException(msg)
-        elif cls._api_version == 3:
-            if not CONF.volume_feature_enabled.api_v3:
-                msg = "Volume API v3 is disabled"
-                raise cls.skipException(msg)
-        else:
-            msg = ("Invalid Cinder API version (%s)" % cls._api_version)
-            raise exceptions.InvalidConfiguration(msg)
 
         api_version_utils.check_skip_with_microversion(
-            cls.min_microversion, cls.max_microversion,
+            cls.volume_min_microversion, cls.volume_max_microversion,
             CONF.volume.min_microversion, CONF.volume.max_microversion)
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources()
+        cls.set_network_resources(
+            network=cls.create_default_network,
+            subnet=cls.create_default_network)
         super(BaseVolumeTest, cls).setup_credentials()
 
     @classmethod
@@ -91,15 +78,20 @@
     def setUp(self):
         super(BaseVolumeTest, self).setUp()
         self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            self.request_microversion))
+            compute_microversion=self.compute_request_microversion,
+            volume_microversion=self.volume_request_microversion))
 
     @classmethod
     def resource_setup(cls):
         super(BaseVolumeTest, cls).resource_setup()
-        cls.request_microversion = (
+        cls.volume_request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.volume_min_microversion,
+                CONF.volume.min_microversion))
+        cls.compute_request_microversion = (
             api_version_utils.select_request_microversion(
                 cls.min_microversion,
-                CONF.volume.min_microversion))
+                CONF.compute.min_microversion))
 
         cls.image_ref = CONF.compute.image_ref
         cls.flavor_ref = CONF.compute.flavor_ref
@@ -124,6 +116,13 @@
             name = data_utils.rand_name(cls.__name__ + '-Volume')
             kwargs['name'] = name
 
+        if CONF.volume.volume_type and 'volume_type' not in kwargs:
+            # If volume_type is not provided in config then no need to
+            # add a volume type and
+            # if volume_type has already been added by child class then
+            # no need to override.
+            kwargs['volume_type'] = CONF.volume.volume_type
+
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
                               CONF.compute.compute_volume_common_az)
@@ -161,6 +160,10 @@
 
         backup = backup_client.create_backup(
             volume_id=volume_id, **kwargs)['backup']
+        # addCleanup uses list pop to cleanup. Wait should be added before
+        # the backup is deleted
+        self.addCleanup(backup_client.wait_for_resource_deletion,
+                        backup['id'])
         self.addCleanup(backup_client.delete_backup, backup['id'])
         waiters.wait_for_volume_resource_status(backup_client, backup['id'],
                                                 'available')
@@ -228,12 +231,15 @@
         return group
 
     def delete_group(self, group_id, delete_volumes=True):
-        self.groups_client.delete_group(group_id, delete_volumes)
+        group_vols = []
         if delete_volumes:
             vols = self.volumes_client.list_volumes(detail=True)['volumes']
             for vol in vols:
                 if vol['group_id'] == group_id:
-                    self.volumes_client.wait_for_resource_deletion(vol['id'])
+                    group_vols.append(vol['id'])
+        self.groups_client.delete_group(group_id, delete_volumes)
+        for vol in group_vols:
+            self.volumes_client.wait_for_resource_deletion(vol)
         self.groups_client.wait_for_resource_deletion(group_id)
 
 
@@ -294,6 +300,27 @@
         cls.addClassResourceCleanup(cls.clear_volume_type, volume_type['id'])
         return volume_type
 
+    def create_encryption_type(self, type_id=None, provider=None,
+                               key_size=None, cipher=None,
+                               control_location=None):
+        if not type_id:
+            volume_type = self.create_volume_type()
+            type_id = volume_type['id']
+        self.admin_encryption_types_client.create_encryption_type(
+            type_id, provider=provider, key_size=key_size, cipher=cipher,
+            control_location=control_location)
+
+    def create_encrypted_volume(self, encryption_provider, key_size=256,
+                                cipher='aes-xts-plain64',
+                                control_location='front-end'):
+        volume_type = self.create_volume_type()
+        self.create_encryption_type(type_id=volume_type['id'],
+                                    provider=encryption_provider,
+                                    key_size=key_size,
+                                    cipher=cipher,
+                                    control_location=control_location)
+        return self.create_volume(volume_type=volume_type['name'])
+
     def create_group_type(self, name=None, **kwargs):
         """Create a test group-type"""
         name = name or data_utils.rand_name(
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index 0b6ee38..39369be 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -22,7 +22,7 @@
 
     @decorators.idempotent_id('01f1ae88-eba9-4c6b-a011-6f7ace06b725')
     def test_get_availability_zone_list(self):
-        # List of availability zone
+        """Test listing volume available zones"""
         availability_zone = (
             self.availability_zone_client.list_availability_zones()
             ['availabilityZoneInfo'])
diff --git a/tempest/api/volume/test_extensions.py b/tempest/api/volume/test_extensions.py
index 39ce00c..acd9ca2 100644
--- a/tempest/api/volume/test_extensions.py
+++ b/tempest/api/volume/test_extensions.py
@@ -26,10 +26,11 @@
 
 
 class ExtensionsTestJSON(base.BaseVolumeTest):
+    """Test volume extensions"""
 
     @decorators.idempotent_id('94607eb0-43a5-47ca-82aa-736b41bd2e2c')
     def test_list_extensions(self):
-        # List of all extensions
+        """Test listing volume extensions"""
         extensions = (self.volumes_extension_client.list_extensions()
                       ['extensions'])
         if not CONF.volume_feature_enabled.api_extensions:
diff --git a/tempest/api/volume/test_image_metadata.py b/tempest/api/volume/test_image_metadata.py
index 53b3acc..8f9bbd2 100644
--- a/tempest/api/volume/test_image_metadata.py
+++ b/tempest/api/volume/test_image_metadata.py
@@ -24,6 +24,7 @@
 
 
 class VolumesImageMetadata(base.BaseVolumeTest):
+    """Test volume image metadata"""
 
     @classmethod
     def skip_checks(cls):
@@ -41,6 +42,7 @@
     @decorators.idempotent_id('03efff0b-5c75-4822-8f10-8789ac15b13e')
     @utils.services('image')
     def test_update_show_delete_image_metadata(self):
+        """Test update/show/delete volume's image metadata"""
         # Update image metadata
         image_metadata = {'image_id': '5137a025-3c5f-43c1-bc64-5f41270040a5',
                           'image_name': 'image',
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index e6fe25d..ee1b5e5 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -23,6 +23,8 @@
 
 
 class SnapshotMetadataTestJSON(base.BaseVolumeTest):
+    """Test snapshot metadata"""
+
     @classmethod
     def skip_checks(cls):
         super(SnapshotMetadataTestJSON, cls).skip_checks()
@@ -45,6 +47,7 @@
 
     @decorators.idempotent_id('a2f20f99-e363-4584-be97-bc33afb1a56c')
     def test_crud_snapshot_metadata(self):
+        """Test create/get/update/delete snapshot metadata"""
         # Create metadata for the snapshot
         metadata = {"key1": "value1",
                     "key2": "value2",
@@ -82,7 +85,7 @@
 
     @decorators.idempotent_id('e8ff85c5-8f97-477f-806a-3ac364a949ed')
     def test_update_show_snapshot_metadata_item(self):
-        # Update metadata item for the snapshot
+        """Test update/show snapshot metadata item"""
         metadata = {"key1": "value1",
                     "key2": "value2",
                     "key3": "value3"}
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index b602032..578be58 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -17,12 +17,12 @@
 
 
 class VersionsTest(base.BaseVolumeTest):
-
-    _api_version = 3
+    """Test volume versions"""
 
     @decorators.idempotent_id('77838fc4-b49b-4c64-9533-166762517369')
     @decorators.attr(type='smoke')
     def test_list_versions(self):
+        """Test listing volume versions"""
         # NOTE: The version data is checked on service client side
         #       with JSON-Schema validation. It is enough to just call
         #       the API here.
@@ -30,6 +30,7 @@
 
     @decorators.idempotent_id('7f755ae2-caa9-4049-988c-331d8f7a579f')
     def test_show_version(self):
+        """Test getting volume version details"""
         # NOTE: The version data is checked on service client side
         # with JSON-Schema validation. So we will loop through each
         # version and call show version.
diff --git a/tempest/api/volume/test_volume_absolute_limits.py b/tempest/api/volume/test_volume_absolute_limits.py
index 00a3375..ccf0804 100644
--- a/tempest/api/volume/test_volume_absolute_limits.py
+++ b/tempest/api/volume/test_volume_absolute_limits.py
@@ -23,7 +23,8 @@
 # NOTE(zhufl): This inherits from BaseVolumeAdminTest because
 # it requires force_tenant_isolation=True, which need admin
 # credentials to create non-admin users for the tests.
-class AbsoluteLimitsTests(base.BaseVolumeAdminTest):  # noqa
+class AbsoluteLimitsTests(base.BaseVolumeAdminTest):  # noqa: T115
+    """Test volume absolute limits"""
 
     # avoid existing volumes of pre-defined tenant
     force_tenant_isolation = True
@@ -43,7 +44,7 @@
 
     @decorators.idempotent_id('8e943f53-e9d6-4272-b2e9-adcf2f7c29ad')
     def test_get_volume_absolute_limits(self):
-        # get volume limit for a tenant
+        """Test getting volume absolute limits"""
         absolute_limits = \
             self.volume_limits_client.show_limits(
             )['limits']['absolute']
diff --git a/tempest/api/volume/test_volume_delete_cascade.py b/tempest/api/volume/test_volume_delete_cascade.py
index bb32c11..53f1bca 100644
--- a/tempest/api/volume/test_volume_delete_cascade.py
+++ b/tempest/api/volume/test_volume_delete_cascade.py
@@ -58,8 +58,11 @@
 
     @decorators.idempotent_id('994e2d40-de37-46e8-b328-a58fba7e4a95')
     def test_volume_delete_cascade(self):
-        # The case validates the ability to delete a volume
-        # with associated snapshots.
+        """Test deleting a volume with associated snapshots
+
+        The case validates the ability to delete a volume
+        with associated snapshots.
+        """
 
         # Create a volume
         volume = self.create_volume()
@@ -78,9 +81,12 @@
     @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
                       'Skip because of Bug#1677525')
     def test_volume_from_snapshot_cascade_delete(self):
-        # The case validates the ability to delete a volume with
-        # associated snapshot while there is another volume created
-        # from that snapshot.
+        """Test deleting a volume with associated volume-associated snapshot
+
+        The case validates the ability to delete a volume with
+        associated snapshot while there is another volume created
+        from that snapshot.
+        """
 
         # Create a volume
         volume = self.create_volume()
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index d203b2d..2151168 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -20,6 +20,7 @@
 
 
 class VolumesMetadataTest(base.BaseVolumeTest):
+    """Test volume metadata"""
 
     @classmethod
     def resource_setup(cls):
@@ -34,6 +35,7 @@
 
     @decorators.idempotent_id('6f5b125b-f664-44bf-910f-751591fe5769')
     def test_crud_volume_metadata(self):
+        """Test creating, getting, updating and deleting of volume metadata"""
         # Create metadata for the volume
         metadata = {"key1": "value1",
                     "key2": "value2",
@@ -71,6 +73,7 @@
 
     @decorators.idempotent_id('862261c5-8df4-475a-8c21-946e50e36a20')
     def test_update_show_volume_metadata_item(self):
+        """Test updating and getting single volume metadata item"""
         # Update metadata item for the volume
         metadata = {"key1": "value1",
                     "key2": "value2",
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4cdf898..f1dec06 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -20,6 +20,7 @@
 
 
 class VolumesTransfersTest(base.BaseVolumeTest):
+    """Test volume transfer"""
 
     credentials = ['primary', 'alt', 'admin']
 
@@ -34,6 +35,7 @@
 
     @decorators.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
     def test_create_get_list_accept_volume_transfer(self):
+        """Test creating, getting, listing and accepting of volume transfer"""
         # Create a volume first
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
@@ -74,6 +76,7 @@
 
     @decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
     def test_create_list_delete_volume_transfer(self):
+        """Test creating, listing and deleting volume transfer"""
         # Create a volume first
         volume = self.create_volume()
         self.addCleanup(self.delete_volume,
@@ -101,3 +104,30 @@
         self.client.delete_volume_transfer(transfer_id)
         waiters.wait_for_volume_resource_status(
             self.volumes_client, volume['id'], 'available')
+
+
+class VolumesTransfersV355Test(VolumesTransfersTest):
+    """Test volume transfer for the "new" Transfers API mv 3.55"""
+
+    volume_min_microversion = '3.55'
+    volume_max_microversion = 'latest'
+
+    credentials = ['primary', 'alt', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(VolumesTransfersV355Test, cls).setup_clients()
+        cls.client = cls.os_primary.volume_transfers_mv355_client_latest
+        cls.alt_client = cls.os_alt.volume_transfers_mv355_client_latest
+
+    @decorators.idempotent_id('9f36bb2b-619f-4507-b246-76aeb9a28851')
+    def test_create_get_list_accept_volume_transfer(self):
+        """Test create, get, list, accept with volume-transfers API mv 3.55"""
+        super(VolumesTransfersV355Test, self). \
+            test_create_get_list_accept_volume_transfer()
+
+    @decorators.idempotent_id('af4a5b97-0859-4f31-aa3c-85b05bb63322')
+    def test_create_list_delete_volume_transfer(self):
+        """Test create, list, delete with volume-transfers API mv 3.55"""
+        super(VolumesTransfersV355Test, self). \
+            test_create_list_delete_volume_transfer()
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index be5638e..5b50bfa 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -25,6 +25,9 @@
 
 
 class VolumesActionsTest(base.BaseVolumeTest):
+    """Test volume actions"""
+
+    create_default_network = True
 
     @classmethod
     def resource_setup(cls):
@@ -37,6 +40,7 @@
     @decorators.attr(type='smoke')
     @utils.services('compute')
     def test_attach_detach_volume_to_instance(self):
+        """Test attaching and detaching volume to instance"""
         # Create a server
         server = self.create_server()
         # Volume is attached and detached successfully from an instance
@@ -52,7 +56,7 @@
 
     @decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
     def test_volume_bootable(self):
-        # Verify that a volume bootable flag is retrieved
+        """Test setting and retrieving bootable flag of a volume"""
         for bool_bootable in [True, False]:
             self.volumes_client.set_bootable_volume(self.volume['id'],
                                                     bootable=bool_bootable)
@@ -68,6 +72,11 @@
     @decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
     @utils.services('compute')
     def test_get_volume_attachment(self):
+        """Test getting volume attachments
+
+        Attach a volume to a server, and then retrieve volume's attachments
+        info.
+        """
         # Create a server
         server = self.create_server()
         # Verify that a volume's attachment information is retrieved
@@ -83,7 +92,6 @@
                         self.volume['id'], 'available')
         self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
         volume = self.volumes_client.show_volume(self.volume['id'])['volume']
-        self.assertIn('attachments', volume)
         attachment = volume['attachments'][0]
 
         self.assertEqual('/dev/%s' %
@@ -96,6 +104,7 @@
     @decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
     @utils.services('image')
     def test_volume_upload(self):
+        """Test uploading volume to create an image"""
         # NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
         # it is shared with the other tests. After it is uploaded in Glance,
         # there is no way to delete it from Cinder, so we delete it from Glance
@@ -118,6 +127,7 @@
 
     @decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
     def test_reserve_unreserve_volume(self):
+        """Test reserving and unreserving volume"""
         # Mark volume as reserved.
         self.volumes_client.reserve_volume(self.volume['id'])
         # To get the volume info
@@ -131,6 +141,7 @@
 
     @decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
     def test_volume_readonly_update(self):
+        """Test updating and retrieve volume's readonly flag"""
         for readonly in [True, False]:
             # Update volume readonly
             self.volumes_client.update_volume_readonly(self.volume['id'],
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index c178272..138d120 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -27,6 +27,7 @@
 
 
 class VolumesBackupsTest(base.BaseVolumeTest):
+    """Test volumes backup"""
 
     @classmethod
     def skip_checks(cls):
@@ -54,6 +55,16 @@
                       'ceph does not support arbitrary container names')
     @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6')
     def test_volume_backup_create_get_detailed_list_restore_delete(self):
+        """Test create/get/list/restore/delete volume backup
+
+        1. Create volume1 with metadata
+        2. Create backup1 from volume1
+        3. Show backup1
+        4. List backups with detail
+        5. Restore backup1
+        6. Verify backup1 has been restored successfully with the metadata
+           of volume1
+        """
         # Create a volume with metadata
         metadata = {"vol-meta1": "value1",
                     "vol-meta2": "value2",
@@ -80,11 +91,7 @@
         self.assertEqual('container', backup['container'])
 
         # Get all backups with detail
-        backups = self.backups_client.list_backups(
-            detail=True)['backups']
-        for backup_info in backups:
-            self.assertIn('created_at', backup_info)
-            self.assertIn('links', backup_info)
+        backups = self.backups_client.list_backups(detail=True)['backups']
         self.assertIn((backup['name'], backup['id']),
                       [(m['name'], m['id']) for m in backups])
 
@@ -93,7 +100,7 @@
         restored_volume_metadata = self.volumes_client.show_volume(
             restored_volume['volume_id'])['volume']['metadata']
 
-        # Verify the backups has been restored successfully
+        # Verify the backup has been restored successfully
         # with the metadata of the source volume.
         self.assertThat(restored_volume_metadata.items(),
                         matchers.ContainsAll(metadata.items()))
@@ -124,6 +131,13 @@
     @decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15')
     @utils.services('image')
     def test_bootable_volume_backup_and_restore(self):
+        """Test backuping and restoring a bootable volume
+
+        1. Create volume1 from image
+        2. Create backup1 from volume1
+        3. Restore backup1
+        4. Verify the restored backup volume is bootable
+        """
         # Create volume from image
         img_uuid = CONF.compute.image_ref
         volume = self.create_volume(imageRef=img_uuid)
@@ -148,10 +162,10 @@
 
 
 class VolumesBackupsV39Test(base.BaseVolumeTest):
+    """Test volumes backup with volume microversion greater than 3.8"""
 
-    _api_version = 3
-    min_microversion = '3.9'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.9'
+    volume_max_microversion = 'latest'
 
     @classmethod
     def skip_checks(cls):
@@ -161,6 +175,7 @@
 
     @decorators.idempotent_id('9b374cbc-be5f-4d37-8848-7efb8a873dcc')
     def test_update_backup(self):
+        """Test updating backup's name and description"""
         # Create volume and backup
         volume = self.create_volume()
         backup = self.create_backup(volume_id=volume['id'])
@@ -176,7 +191,6 @@
             backup['id'], **update_kwargs)['backup']
         self.assertEqual(backup['id'], update_backup['id'])
         self.assertEqual(update_kwargs['name'], update_backup['name'])
-        self.assertIn('links', update_backup)
 
         # Assert response body for show_backup method
         retrieved_backup = self.backups_client.show_backup(
diff --git a/tempest/api/volume/test_volumes_clone.py b/tempest/api/volume/test_volumes_clone.py
index ea39a21..9ca1c5e 100644
--- a/tempest/api/volume/test_volumes_clone.py
+++ b/tempest/api/volume/test_volumes_clone.py
@@ -23,6 +23,7 @@
 
 
 class VolumesCloneTest(base.BaseVolumeTest):
+    """Test volume clone"""
 
     @classmethod
     def skip_checks(cls):
@@ -44,20 +45,23 @@
 
     @decorators.idempotent_id('9adae371-a257-43a5-9555-dc7c88e66e0e')
     def test_create_from_volume(self):
+        """Test cloning a volume with increasing size"""
         # Creates a volume from another volume passing a size different from
         # the source volume.
         src_size = CONF.volume.volume_size
+        extend_size = CONF.volume.volume_size_extend
 
         src_vol = self.create_volume(size=src_size)
         # Destination volume bigger than source
         dst_vol = self.create_volume(source_volid=src_vol['id'],
-                                     size=src_size + 1)
+                                     size=src_size + extend_size)
 
-        self._verify_volume_clone(src_vol, dst_vol, extra_size=1)
+        self._verify_volume_clone(src_vol, dst_vol, extra_size=extend_size)
 
     @decorators.idempotent_id('cbbcd7c6-5a6c-481a-97ac-ca55ab715d16')
     @utils.services('image')
     def test_create_from_bootable_volume(self):
+        """Test cloning a bootable volume"""
         # Create volume from image
         img_uuid = CONF.compute.image_ref
         src_vol = self.create_volume(imageRef=img_uuid)
diff --git a/tempest/api/volume/test_volumes_clone_negative.py b/tempest/api/volume/test_volumes_clone_negative.py
index bba7a0b..115465c 100644
--- a/tempest/api/volume/test_volumes_clone_negative.py
+++ b/tempest/api/volume/test_volumes_clone_negative.py
@@ -22,6 +22,7 @@
 
 
 class VolumesCloneNegativeTest(base.BaseVolumeTest):
+    """Negative tests of volume clone"""
 
     @classmethod
     def skip_checks(cls):
@@ -32,13 +33,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9adae371-a257-43a5-459a-dc7c88e66e0e')
     def test_create_from_volume_decreasing_size(self):
+        """Test cloning a volume with decreasing size will fail"""
         # Creates a volume from another volume passing a size different from
         # the source volume.
-        src_size = CONF.volume.volume_size + 1
+        src_size = CONF.volume.volume_size + CONF.volume.volume_size_extend
         src_vol = self.create_volume(size=src_size)
 
         # Destination volume smaller than source
         self.assertRaises(exceptions.BadRequest,
                           self.volumes_client.create_volume,
-                          size=src_size - 1,
+                          size=src_size - CONF.volume.volume_size_extend,
                           source_volid=src_vol['id'])
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index c3f44e2..fcbc982 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -28,9 +28,11 @@
 
 
 class VolumesExtendTest(base.BaseVolumeTest):
+    """Test volume extend"""
 
     @decorators.idempotent_id('9a36df71-a257-43a5-9555-dc7c88e66e0e')
     def test_volume_extend(self):
+        """Test extend a volume"""
         # Extend Volume Test.
         volume = self.create_volume(imageRef=self.image_ref)
         extend_size = volume['size'] * 2
@@ -45,6 +47,7 @@
     @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
                           "Cinder volume snapshots are disabled")
     def test_volume_extend_when_volume_has_snapshot(self):
+        """Test extending a volume which has a snapshot"""
         volume = self.create_volume()
         self.create_snapshot(volume['id'])
 
@@ -58,8 +61,9 @@
         self.assertEqual(extend_size, resized_volume['size'])
 
 
-class VolumesExtendAttachedTest(base.BaseVolumeTest):
+class BaseVolumesExtendAttachedTest(base.BaseVolumeTest):
     """Tests extending the size of an attached volume."""
+    create_default_network = True
 
     # We need admin credentials for getting instance action event details. By
     # default a non-admin can list and show instance actions if they own the
@@ -73,11 +77,10 @@
     # details once that microversion is available in Nova.
     credentials = ['primary', 'admin']
 
-    _api_version = 3
     # NOTE(mriedem): The minimum required volume API version is 3.42 and the
     # minimum required compute API microversion is 2.51, but the compute call
     # is implicit - Cinder calls Nova at that microversion, Tempest does not.
-    min_microversion = '3.42'
+    volume_min_microversion = '3.42'
 
     def _find_extend_volume_instance_action(self, server_id):
         actions = self.servers_client.list_instance_actions(
@@ -96,14 +99,9 @@
                     event['finish_time']):
                 return event
 
-    @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
-    @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
-                          "Attached volume extend is disabled.")
-    @utils.services('compute')
-    def test_extend_attached_volume(self):
+    def _test_extend_attached_volume(self, volume):
         """This is a happy path test which does the following:
 
-        * Create a volume at the configured volume_size.
         * Create a server instance.
         * Attach the volume to the server.
         * Wait for the volume status to be "in-use".
@@ -115,8 +113,6 @@
           if we timeout waiting for the instance action event to show up, or
           if the action on the server fails.
         """
-        # Create a test volume. Will be automatically cleaned up on teardown.
-        volume = self.create_volume()
         # Create a test server. Will be automatically cleaned up on teardown.
         server = self.create_server()
         # Attach the volume to the server and wait for the volume status to be
@@ -178,3 +174,14 @@
             "%(request_id)s." %
             {'result': event['result'],
              'request_id': action['request_id']})
+
+
+class VolumesExtendAttachedTest(BaseVolumesExtendAttachedTest):
+
+    @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
+    @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
+                          "Attached volume extend is disabled.")
+    @utils.services('compute')
+    def test_extend_attached_volume(self):
+        volume = self.create_volume()
+        self._test_extend_attached_volume(volume)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 71db95c..2009970 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -27,6 +27,7 @@
 
 
 class VolumesGetTest(base.BaseVolumeTest):
+    """Test getting volume info"""
 
     def _volume_create_get_update_delete(self, **kwargs):
         # Create a volume, Get it's details and Delete the volume
@@ -36,11 +37,9 @@
         kwargs['name'] = v_name
         kwargs['metadata'] = metadata
         volume = self.volumes_client.create_volume(**kwargs)['volume']
-        self.assertIn('id', volume)
         self.addCleanup(self.delete_volume, self.volumes_client, volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'available')
-        self.assertIn('name', volume)
         self.assertEqual(volume['name'], v_name,
                          "The created volume name is not equal "
                          "to the requested name")
@@ -100,7 +99,6 @@
                   'availability_zone': volume['availability_zone'],
                   'size': CONF.volume.volume_size}
         new_volume = self.volumes_client.create_volume(**params)['volume']
-        self.assertIn('id', new_volume)
         self.addCleanup(self.delete_volume, self.volumes_client,
                         new_volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
@@ -118,12 +116,14 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51')
     def test_volume_create_get_update_delete(self):
+        """Test Create/Get/Update/Delete of a blank volume"""
         self._volume_create_get_update_delete(size=CONF.volume.volume_size)
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
     @utils.services('image')
     def test_volume_create_get_update_delete_from_image(self):
+        """Test Create/Get/Update/Delete of a volume created from image"""
         image = self.images_client.show_image(CONF.compute.image_ref)
         min_disk = image['min_disk']
         disk_size = max(min_disk, CONF.volume.volume_size)
@@ -134,20 +134,20 @@
     @testtools.skipUnless(CONF.volume_feature_enabled.clone,
                           'Cinder volume clones are disabled')
     def test_volume_create_get_update_delete_as_clone(self):
+        """Test Create/Get/Update/Delete of a cloned volume"""
         origin = self.create_volume()
         self._volume_create_get_update_delete(source_volid=origin['id'],
                                               size=CONF.volume.volume_size)
 
 
 class VolumesSummaryTest(base.BaseVolumeTest):
+    """Test volume summary"""
 
-    _api_version = 3
-    min_microversion = '3.12'
-    max_microversion = 'latest'
+    volume_min_microversion = '3.12'
+    volume_max_microversion = 'latest'
 
     @decorators.idempotent_id('c4f2431e-4920-4736-9e00-4040386b6feb')
     def test_show_volume_summary(self):
-        volume_summary = \
-            self.volumes_client.show_volume_summary()['volume-summary']
-        for key in ['total_size', 'total_count']:
-            self.assertIn(key, volume_summary)
+        """Test showing volume summary"""
+        # check response schema
+        self.volumes_client.show_volume_summary()
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 2345698..1d1981c 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -16,8 +16,8 @@
 
 import operator
 import random
+from urllib.parse import urlparse
 
-from six.moves.urllib.parse import urlparse
 from testtools import matchers
 
 from tempest.api.volume import base
@@ -26,11 +26,14 @@
 
 
 class VolumesListTestJSON(base.BaseVolumeTest):
-    # NOTE: This test creates a number of 1G volumes. To run it successfully,
-    # ensure that the backing file for the volume group that Cinder uses
-    # has space for at least 3 1G volumes!
-    # If you are running a Devstack environment, ensure that the
-    # VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
+    """Test listing volumes
+
+    NOTE: This test creates a number of 1G volumes. To run it successfully,
+    ensure that the backing file for the volume group that Cinder uses
+    has space for at least 3 1G volumes!
+    If you are running a Devstack environment, ensure that the
+    VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
+    """
 
     VOLUME_FIELDS = ('id', 'name')
 
@@ -116,7 +119,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('0b6ddd39-b948-471f-8038-4787978747c4')
     def test_volume_list(self):
-        # Get a list of Volumes
+        """Test getting a list of volumes"""
         # Fetch all volumes
         fetched_list = self.volumes_client.list_volumes()['volumes']
         self._assert_volumes_in(fetched_list, self.volume_list,
@@ -124,13 +127,14 @@
 
     @decorators.idempotent_id('adcbb5a7-5ad8-4b61-bd10-5380e111a877')
     def test_volume_list_with_details(self):
-        # Get a list of Volumes with details
+        """Test getting a list of detailed volumes"""
         # Fetch all Volumes
         fetched_list = self.volumes_client.list_volumes(detail=True)['volumes']
         self._assert_volumes_in(fetched_list, self.volume_list)
 
     @decorators.idempotent_id('a28e8da4-0b56-472f-87a8-0f4d3f819c02')
     def test_volume_list_by_name(self):
+        """Test getting a list of volumes filtered by volume name"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         params = {'name': volume['name']}
         fetched_vol = self.volumes_client.list_volumes(
@@ -140,6 +144,7 @@
 
     @decorators.idempotent_id('2de3a6d4-12aa-403b-a8f2-fdeb42a89623')
     def test_volume_list_details_by_name(self):
+        """Test getting a list of detailed volumes filtered by volume name"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         params = {'name': volume['name']}
         fetched_vol = self.volumes_client.list_volumes(
@@ -149,6 +154,7 @@
 
     @decorators.idempotent_id('39654e13-734c-4dab-95ce-7613bf8407ce')
     def test_volumes_list_by_status(self):
+        """Test getting a list of volumes filtered by volume status"""
         params = {'status': 'available'}
         fetched_list = self.volumes_client.list_volumes(
             params=params)['volumes']
@@ -158,6 +164,7 @@
 
     @decorators.idempotent_id('2943f712-71ec-482a-bf49-d5ca06216b9f')
     def test_volumes_list_details_by_status(self):
+        """Test getting a list of detailed volumes filtered by status"""
         params = {'status': 'available'}
         fetched_list = self.volumes_client.list_volumes(
             detail=True, params=params)['volumes']
@@ -181,6 +188,7 @@
 
     @decorators.idempotent_id('2016a939-72ec-482a-bf49-d5ca06216b9f')
     def test_volumes_list_details_by_bootable(self):
+        """Test getting a list of detailed volumes filtered by bootable"""
         params = {'bootable': 'false'}
         fetched_list = self.volumes_client.list_volumes(
             detail=True, params=params)['volumes']
@@ -190,6 +198,7 @@
 
     @decorators.idempotent_id('c0cfa863-3020-40d7-b587-e35f597d5d87')
     def test_volumes_list_by_availability_zone(self):
+        """Test getting a list of volumes filtered by availability zone"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         zone = volume['availability_zone']
         params = {'availability_zone': zone}
@@ -201,6 +210,7 @@
 
     @decorators.idempotent_id('e1b80d13-94f0-4ba2-a40e-386af29f8db1')
     def test_volumes_list_details_by_availability_zone(self):
+        """Test getting a list of detailed volumes by availability zone"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         zone = volume['availability_zone']
         params = {'availability_zone': zone}
@@ -212,19 +222,19 @@
 
     @decorators.idempotent_id('b5ebea1b-0603-40a0-bb41-15fcd0a53214')
     def test_volume_list_with_param_metadata(self):
-        # Test to list volumes when metadata param is given
+        """Test listing volumes when metadata param is given"""
         params = {'metadata': self.metadata}
         self._list_by_param_value_and_assert(params)
 
     @decorators.idempotent_id('1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d')
     def test_volume_list_with_detail_param_metadata(self):
-        # Test to list volumes details when metadata param is given
+        """Test listing volumes details when metadata param is given"""
         params = {'metadata': self.metadata}
         self._list_by_param_value_and_assert(params, with_detail=True)
 
     @decorators.idempotent_id('777c87c1-2fc4-4883-8b8e-5c0b951d1ec8')
     def test_volume_list_param_display_name_and_status(self):
-        # Test to list volume when display name and status param is given
+        """Test listing volume when display name and status param is given"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         params = {'name': volume['name'],
                   'status': 'available'}
@@ -232,7 +242,7 @@
 
     @decorators.idempotent_id('856ab8ca-6009-4c37-b691-be1065528ad4')
     def test_volume_list_with_detail_param_display_name_and_status(self):
-        # Test to list volume when name and status param is given
+        """Test listing volume when name and status param is given"""
         volume = self.volume_list[data_utils.rand_int_id(0, 2)]
         params = {'name': volume['name'],
                   'status': 'available'}
@@ -240,7 +250,7 @@
 
     @decorators.idempotent_id('2a7064eb-b9c3-429b-b888-33928fc5edd3')
     def test_volume_list_details_with_multiple_params(self):
-        # List volumes detail using combined condition
+        """Test listing volumes detail using combined filtering condition"""
         def _list_details_with_multiple_params(limit=2,
                                                status='available',
                                                sort_dir='asc',
@@ -375,14 +385,29 @@
 
     @decorators.idempotent_id('e9138a2c-f67b-4796-8efa-635c196d01de')
     def test_volume_list_details_pagination(self):
+        """Test listing volumes with details by pagination
+
+        All volumes will be returned by multiple requests, and the number of
+        'limit' volumes will be returned at a time.
+        """
         self._test_pagination('volumes', ids=self.volume_id_list, detail=True)
 
     @decorators.idempotent_id('af55e775-8e4b-4feb-8719-215c43b0238c')
     def test_volume_list_pagination(self):
+        """Test listing volumes by pagination
+
+        All volumes will be returned by multiple requests, and the number of
+        'limit' volumes will be returned at a time.
+        """
         self._test_pagination('volumes', ids=self.volume_id_list, detail=False)
 
     @decorators.idempotent_id('46eff077-100b-427f-914e-3db2abcdb7e2')
     def test_volume_list_with_detail_param_marker(self):
+        """Test listing volumes with details from the specified marker
+
+        Choose a volume id from all volumes as a marker, list volumes with
+        that marker, only volumes with id greater than marker will be returned.
+        """
         # Choosing a random volume from a list of volumes for 'marker'
         # parameter
         marker = random.choice(self.volume_id_list)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 866bd87..d9b8430 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.volume import base
 from tempest.common import utils
@@ -28,6 +28,7 @@
 
 
 class VolumesNegativeTest(base.BaseVolumeTest):
+    """Negative tests of volumes"""
 
     @classmethod
     def resource_setup(cls):
@@ -44,12 +45,12 @@
             container_format=CONF.image.container_formats[0],
             disk_format=CONF.image.disk_formats[0],
             visibility='private',
-            min_disk=CONF.volume.volume_size + 1)
+            min_disk=CONF.volume.volume_size + CONF.volume.volume_size_extend)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.images_client.delete_image, image['id'])
 
         # Upload image with 1KB data
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         self.images_client.store_image_file(image['id'], image_file)
         waiters.wait_for_image_status(self.images_client,
                                       image['id'], 'active')
@@ -58,50 +59,49 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f131c586-9448-44a4-a8b0-54ca838aa43e')
     def test_volume_get_nonexistent_volume_id(self):
-        # Should not be able to get a non-existent volume
+        """Test getting non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.show_volume,
                           data_utils.rand_uuid())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('555efa6e-efcd-44ef-8a3b-4a7ca4837a29')
     def test_volume_delete_nonexistent_volume_id(self):
-        # Should not be able to delete a non-existent Volume
+        """Test deleting non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.delete_volume,
                           data_utils.rand_uuid())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1ed83a8a-682d-4dfb-a30e-ee63ffd6c049')
     def test_create_volume_with_invalid_size(self):
-        # Should not be able to create volume with invalid size in request
+        """Test creating volume with invalid size should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.create_volume, size='#$%')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9387686f-334f-4d31-a439-33494b9e2683')
     def test_create_volume_without_passing_size(self):
-        # Should not be able to create volume without passing size
-        # in request
+        """Test creating volume with empty size should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.create_volume, size='')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41331caa-eaf4-4001-869d-bc18c1869360')
     def test_create_volume_with_size_zero(self):
-        # Should not be able to create volume with size zero
+        """Test creating volume with zero size should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.create_volume, size='0')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8b472729-9eba-446e-a83b-916bdb34bef7')
     def test_create_volume_with_size_negative(self):
-        # Should not be able to create volume with size negative
+        """Test creating volume with negative size should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.create_volume, size='-1')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('10254ed8-3849-454e-862e-3ab8e6aa01d2')
     def test_create_volume_with_nonexistent_volume_type(self):
-        # Should not be able to create volume with non-existent volume type
+        """Test creating volume with non existent volume type should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
                           size=CONF.volume.volume_size,
                           volume_type=data_utils.rand_uuid())
@@ -109,7 +109,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9')
     def test_create_volume_with_nonexistent_snapshot_id(self):
-        # Should not be able to create volume with non-existent snapshot
+        """Test creating volume with non existent snapshot should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
                           size=CONF.volume.volume_size,
                           snapshot_id=data_utils.rand_uuid())
@@ -117,7 +117,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344')
     def test_create_volume_with_nonexistent_source_volid(self):
-        # Should not be able to create volume with non-existent source volume
+        """Test creating volume with non existent source volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
                           size=CONF.volume.volume_size,
                           source_volid=data_utils.rand_uuid())
@@ -125,46 +125,49 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0186422c-999a-480e-a026-6a665744c30c')
     def test_update_volume_with_nonexistent_volume_id(self):
+        """Test updating non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_uuid())
+                          volume_id=data_utils.rand_uuid(), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
     def test_update_volume_with_invalid_volume_id(self):
+        """Test updating volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_name('invalid'))
+                          volume_id=data_utils.rand_name('invalid'), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
     def test_update_volume_with_empty_volume_id(self):
+        """Test updating volume with empty volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
                           volume_id='')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('30799cfd-7ee4-446c-b66c-45b383ed211b')
     def test_get_invalid_volume_id(self):
-        # Should not be able to get volume with invalid id
+        """Test getting volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.show_volume,
                           data_utils.rand_name('invalid'))
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c6c3db06-29ad-4e91-beb0-2ab195fe49e3')
     def test_get_volume_without_passing_volume_id(self):
-        # Should not be able to get volume when empty ID is passed
+        """Test getting volume with empty volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.show_volume, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1f035827-7c32-4019-9240-b4ec2dbd9dfd')
     def test_delete_invalid_volume_id(self):
-        # Should not be able to delete volume when invalid ID is passed
+        """Test deleting volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.delete_volume,
                           data_utils.rand_name('invalid'))
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('441a1550-5d44-4b30-af0f-a6d402f52026')
     def test_delete_volume_without_passing_volume_id(self):
-        # Should not be able to delete volume when empty ID is passed
+        """Test deleting volume with empty volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.delete_volume, '')
 
@@ -172,6 +175,7 @@
     @decorators.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6')
     @utils.services('compute')
     def test_attach_volumes_with_nonexistent_volume_id(self):
+        """Test attaching non existent volume to server should fail"""
         server = self.create_server()
 
         self.assertRaises(lib_exc.NotFound,
@@ -183,6 +187,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
     def test_detach_volumes_with_invalid_volume_id(self):
+        """Test detaching volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.detach_volume,
                           'xxx')
@@ -190,7 +195,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e0c75c74-ee34-41a9-9288-2a2051452854')
     def test_volume_extend_with_size_smaller_than_original_size(self):
-        # Extend volume with smaller size than original size.
+        """Test extending volume with decreasing size should fail"""
         extend_size = 0
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.extend_volume,
@@ -199,7 +204,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
     def test_volume_extend_with_non_number_size(self):
-        # Extend volume when size is non number.
+        """Test extending volume with non-integer size should fail"""
         extend_size = 'abc'
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.extend_volume,
@@ -208,7 +213,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
     def test_volume_extend_with_None_size(self):
-        # Extend volume with None size.
+        """Test extending volume with none size should fail"""
         extend_size = None
         self.assertRaises(lib_exc.BadRequest,
                           self.volumes_client.extend_volume,
@@ -217,22 +222,23 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
     def test_volume_extend_with_nonexistent_volume_id(self):
-        # Extend volume size when volume is nonexistent.
-        extend_size = self.volume['size'] + 1
+        """Test extending non existent volume should fail"""
+        extend_size = self.volume['size'] + CONF.volume.volume_size_extend
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           data_utils.rand_uuid(), new_size=extend_size)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
     def test_volume_extend_without_passing_volume_id(self):
-        # Extend volume size when passing volume id is None.
-        extend_size = self.volume['size'] + 1
+        """Test extending volume without passing volume id should fail"""
+        extend_size = self.volume['size'] + CONF.volume.volume_size_extend
         self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
                           None, new_size=extend_size)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
     def test_reserve_volume_with_nonexistent_volume_id(self):
+        """Test reserving non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.reserve_volume,
                           data_utils.rand_uuid())
@@ -240,6 +246,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('eb467654-3dc1-4a72-9b46-47c29d22654c')
     def test_unreserve_volume_with_nonexistent_volume_id(self):
+        """Test unreserving non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.volumes_client.unreserve_volume,
                           data_utils.rand_uuid())
@@ -247,6 +254,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c')
     def test_reserve_volume_with_negative_volume_status(self):
+        """Test reserving already reserved volume should fail"""
         # Mark volume as reserved.
         self.volumes_client.reserve_volume(self.volume['id'])
         # Mark volume which is marked as reserved before
@@ -259,6 +267,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f')
     def test_list_volumes_with_nonexistent_name(self):
+        """Test listing volumes with non existent name should get nothing"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         params = {'name': v_name}
         fetched_volume = self.volumes_client.list_volumes(
@@ -268,6 +277,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9ca17820-a0e7-4cbd-a7fa-f4468735e359')
     def test_list_volumes_detail_with_nonexistent_name(self):
+        """Test listing volume details with non existent name
+
+        Listing volume details with non existent name should get nothing.
+        """
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         params = {'name': v_name}
         fetched_volume = \
@@ -278,6 +291,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('143b279b-7522-466b-81be-34a87d564a7c')
     def test_list_volumes_with_invalid_status(self):
+        """Test listing volumes with invalid status should get nothing"""
         params = {'status': 'null'}
         fetched_volume = self.volumes_client.list_volumes(
             params=params)['volumes']
@@ -286,6 +300,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ba94b27b-be3f-496c-a00e-0283b373fa75')
     def test_list_volumes_detail_with_invalid_status(self):
+        """Test listing volume details with invalid status
+
+        Listing volume details with invalid status should get nothing
+        """
         params = {'status': 'null'}
         fetched_volume = \
             self.volumes_client.list_volumes(detail=True,
@@ -296,6 +314,7 @@
     @decorators.idempotent_id('5b810c91-0ad1-47ce-aee8-615f789be78f')
     @utils.services('image')
     def test_create_volume_from_image_with_decreasing_size(self):
+        """Test creating volume from image with decreasing size should fail"""
         # Create image
         image = self.create_image()
 
@@ -311,11 +330,15 @@
     @decorators.idempotent_id('d15e7f35-2cfc-48c8-9418-c8223a89bcbb')
     @utils.services('image')
     def test_create_volume_from_deactivated_image(self):
+        """Test creating volume from deactivated image should fail"""
         # Create image
         image = self.create_image()
 
         # Deactivate the image
         self.images_client.deactivate_image(image['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.images_client.reactivate_image, image['id'])
+
         body = self.images_client.show_image(image['id'])
         self.assertEqual("deactivated", body['status'])
         # Try creating a volume from deactivated image
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 72e7290..a58da7e 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -25,6 +25,9 @@
 
 
 class VolumesSnapshotTestJSON(base.BaseVolumeTest):
+    """Test volume snapshots"""
+
+    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -40,6 +43,7 @@
     @decorators.idempotent_id('8567b54c-4455-446d-a1cf-651ddeaa3ff2')
     @utils.services('compute')
     def test_snapshot_create_delete_with_volume_in_use(self):
+        """Test create/delete snapshot from volume attached to server"""
         # Create a test instance
         server = self.create_server()
         # NOTE(zhufl) Here we create volume from self.image_ref for adding
@@ -65,7 +69,13 @@
     @decorators.idempotent_id('5210a1de-85a0-11e6-bb21-641c676a5d61')
     @utils.services('compute')
     def test_snapshot_create_offline_delete_online(self):
+        """Test creating snapshots when volume is detached and attached
 
+        1. Create snapshot1 from volume1(not attached to any server)
+        2. Attach volume1 to server1
+        3. Create snapshot2 and snapshot3 from volume1
+        4. Delete snapshot3, snapshot1, snapshot2
+        """
         # Create a snapshot while it is not attached
         snapshot1 = self.create_snapshot(self.volume_origin['id'])
 
@@ -73,7 +83,7 @@
         server = self.create_server()
         self.attach_volume(server['id'], self.volume_origin['id'])
 
-        # Now that the volume is attached, create another snapshots
+        # Now that the volume is attached, create other snapshots
         snapshot2 = self.create_snapshot(self.volume_origin['id'], force=True)
         snapshot3 = self.create_snapshot(self.volume_origin['id'], force=True)
 
@@ -85,6 +95,7 @@
 
     @decorators.idempotent_id('2a8abbe4-d871-46db-b049-c41f5af8216e')
     def test_snapshot_create_get_list_update_delete(self):
+        """Test create/get/list/update/delete snapshot"""
         # Create a snapshot with metadata
         metadata = {"snap-meta1": "value1",
                     "snap-meta2": "value2",
@@ -155,19 +166,26 @@
 
     @decorators.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot(self):
-        # Creates a volume from a snapshot passing a size
-        # different from the source
-        self._create_volume_from_snapshot(extra_size=1)
+        """Test creating volume from snapshot with extending size"""
+        self._create_volume_from_snapshot(
+            extra_size=CONF.volume.volume_size_extend)
 
     @decorators.idempotent_id('053d8870-8282-4fff-9dbb-99cb58bb5e0a')
     def test_volume_from_snapshot_no_size(self):
-        # Creates a volume from a snapshot defaulting to original size
+        """Test creating volume from snapshot with original size"""
         self._create_volume_from_snapshot()
 
     @decorators.idempotent_id('bbcfa285-af7f-479e-8c1a-8c34fc16543c')
     @testtools.skipUnless(CONF.volume_feature_enabled.backup,
                           "Cinder backup is disabled")
     def test_snapshot_backup(self):
+        """Test creating backup from snapshot and volume
+
+        1. Create snapshot1 from volume1
+        2. Create backup from volume1 and snapshot1
+        3. Check the created backup's volume is volume1 and snapshot
+           is snapshot1
+        """
         # Create a snapshot
         snapshot = self.create_snapshot(volume_id=self.volume_origin['id'])
 
diff --git a/tempest/api/volume/test_volumes_snapshots_list.py b/tempest/api/volume/test_volumes_snapshots_list.py
index 8a416ea..77627bc 100644
--- a/tempest/api/volume/test_volumes_snapshots_list.py
+++ b/tempest/api/volume/test_volumes_snapshots_list.py
@@ -18,6 +18,7 @@
 
 
 class VolumesSnapshotListTestJSON(base.BaseVolumeTest):
+    """Test listing volume snapshots"""
 
     @classmethod
     def skip_checks(cls):
@@ -50,6 +51,7 @@
 
     def _list_snapshots_by_param_limit(self, limit, expected_elements):
         """list snapshots by limit param"""
+
         # Get snapshots list using limit parameter
         fetched_snap_list = self.snapshots_client.list_snapshots(
             limit=limit)['snapshots']
@@ -58,7 +60,8 @@
 
     @decorators.idempotent_id('59f41f43-aebf-48a9-ab5d-d76340fab32b')
     def test_snapshots_list_with_params(self):
-        """list snapshots with params."""
+        """Test listing snapshots with params"""
+
         # Verify list snapshots by display_name filter
         params = {'name': self.snapshot['name']}
         self._list_by_param_values_and_assert(**params)
@@ -74,7 +77,8 @@
 
     @decorators.idempotent_id('220a1022-1fcd-4a74-a7bd-6b859156cda2')
     def test_snapshots_list_details_with_params(self):
-        """list snapshot details with params."""
+        """Test listing snapshot details with params"""
+
         # Verify list snapshot details by display_name filter
         params = {'name': self.snapshot['name']}
         self._list_by_param_values_and_assert(with_detail=True, **params)
@@ -88,28 +92,33 @@
 
     @decorators.idempotent_id('db4d8e0a-7a2e-41cc-a712-961f6844e896')
     def test_snapshot_list_param_limit(self):
-        # List returns limited elements
+        """Test listing snapshot with limit returns the limited elements
+
+        If listing snapshots with limit=1, then 1 snapshot is returned.
+        """
         self._list_snapshots_by_param_limit(limit=1, expected_elements=1)
 
     @decorators.idempotent_id('a1427f61-420e-48a5-b6e3-0b394fa95400')
     def test_snapshot_list_param_limit_equals_infinite(self):
-        # List returns all elements when request limit exceeded
-        # snapshots number
+        """Test listing snapshot with infinite limit
+
+        If listing snapshots with limit greater than the count of all
+        snapshots, then all snapshots are returned.
+        """
         snap_list = self.snapshots_client.list_snapshots()['snapshots']
         self._list_snapshots_by_param_limit(limit=100000,
                                             expected_elements=len(snap_list))
 
     @decorators.idempotent_id('e3b44b7f-ae87-45b5-8a8c-66110eb24d0a')
     def test_snapshot_list_param_limit_equals_zero(self):
-        # List returns zero elements
+        """Test listing snapshot with zero limit should return empty list"""
         self._list_snapshots_by_param_limit(limit=0, expected_elements=0)
 
     def _list_snapshots_param_sort(self, sort_key, sort_dir):
-        """list snapshots by sort param"""
         snap_list = self.snapshots_client.list_snapshots(
             sort_key=sort_key, sort_dir=sort_dir)['snapshots']
         self.assertNotEmpty(snap_list)
-        if sort_key is 'display_name':
+        if sort_key == 'display_name':
             sort_key = 'name'
         # Note: On Cinder API, 'display_name' works as a sort key
         # on a request, a volume name appears as 'name' on the response.
@@ -122,33 +131,42 @@
 
     @decorators.idempotent_id('c5513ada-64c1-4d28-83b9-af3307ec1388')
     def test_snapshot_list_param_sort_id_asc(self):
+        """Test listing snapshots sort by id ascendingly"""
         self._list_snapshots_param_sort(sort_key='id', sort_dir='asc')
 
     @decorators.idempotent_id('8a7fe058-0b41-402a-8afd-2dbc5a4a718b')
     def test_snapshot_list_param_sort_id_desc(self):
+        """Test listing snapshots sort by id descendingly"""
         self._list_snapshots_param_sort(sort_key='id', sort_dir='desc')
 
     @decorators.idempotent_id('4052c3a0-2415-440a-a8cc-305a875331b0')
     def test_snapshot_list_param_sort_created_at_asc(self):
+        """Test listing snapshots sort by created_at ascendingly"""
         self._list_snapshots_param_sort(sort_key='created_at', sort_dir='asc')
 
     @decorators.idempotent_id('dcbbe24a-f3c0-4ec8-9274-55d48db8d1cf')
     def test_snapshot_list_param_sort_created_at_desc(self):
+        """Test listing snapshots sort by created_at descendingly"""
         self._list_snapshots_param_sort(sort_key='created_at', sort_dir='desc')
 
     @decorators.idempotent_id('d58b5fed-0c37-42d3-8c5d-39014ac13c00')
     def test_snapshot_list_param_sort_name_asc(self):
+        """Test listing snapshots sort by display_name ascendingly"""
         self._list_snapshots_param_sort(sort_key='display_name',
                                         sort_dir='asc')
 
     @decorators.idempotent_id('96ba6f4d-1f18-47e1-b4bc-76edc6c21250')
     def test_snapshot_list_param_sort_name_desc(self):
+        """Test listing snapshots sort by display_name descendingly"""
         self._list_snapshots_param_sort(sort_key='display_name',
                                         sort_dir='desc')
 
     @decorators.idempotent_id('05489dde-44bc-4961-a1f5-3ce7ee7824f7')
     def test_snapshot_list_param_marker(self):
-        # The list of snapshots should end before the provided marker
+        """Test listing snapshots with marker
+
+        The list of snapshots should end before the provided marker
+        """
         snap_list = self.snapshots_client.list_snapshots()['snapshots']
         # list_snapshots will take the reverse order as they are created.
         snapshot_id_list = [snap['id'] for snap in snap_list][::-1]
@@ -163,6 +181,13 @@
 
     @decorators.idempotent_id('ca96d551-17c6-4e11-b0e8-52d3bb8a63c7')
     def test_snapshot_list_param_offset(self):
+        """Test listing snapshots with offset and limit
+
+        If listing snapshots with offset=2 and limit=3, then at most 3(limit)
+        snapshots located in the position 2(offset) in the all snapshots list
+        should be returned.
+        (The items in the all snapshots list start from position 0.)
+        """
         params = {'offset': 2, 'limit': 3}
         snap_list = self.snapshots_client.list_snapshots(**params)['snapshots']
         # Verify the list of snapshots skip offset=2 from the first element
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 0453c0a..9c36dc6 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -20,6 +20,7 @@
 
 
 class VolumesSnapshotNegativeTestJSON(base.BaseVolumeTest):
+    """Negative tests of volume snapshot"""
 
     @classmethod
     def skip_checks(cls):
@@ -30,7 +31,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e3e466af-70ab-4f4b-a967-ab04e3532ea7')
     def test_create_snapshot_with_nonexistent_volume_id(self):
-        # Create a snapshot with nonexistent volume id
+        """Test creating snapshot from non existent volume should fail"""
         s_name = data_utils.rand_name(self.__class__.__name__ + '-snap')
         self.assertRaises(lib_exc.NotFound,
                           self.snapshots_client.create_snapshot,
@@ -40,6 +41,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bb9da53e-d335-4309-9c15-7e76fd5e4d6d')
     def test_create_snapshot_without_passing_volume_id(self):
+        """Test creating snapshot without passing volume_id should fail"""
         # Create a snapshot without passing volume id
         s_name = data_utils.rand_name(self.__class__.__name__ + '-snap')
         self.assertRaises(lib_exc.NotFound,
@@ -49,6 +51,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('677863d1-34f9-456d-b6ac-9924f667a7f4')
     def test_volume_from_snapshot_decreasing_size(self):
+        """Test creating volume from snapshot with decreasing size
+
+        creating volume from snapshot with decreasing size should fail.
+        """
         # Creates a volume a snapshot passing a size different from the source
         src_size = CONF.volume.volume_size * 2
 
@@ -64,6 +70,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8fd92339-e22f-4591-86b4-1e2215372a40')
     def test_list_snapshot_invalid_param_limit(self):
+        """Test listing snapshots with invalid limit param should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.snapshots_client.list_snapshots,
                           limit='invalid')
@@ -71,6 +78,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('27b5f37f-bf69-4e8c-986e-c44f3d6819b8')
     def test_list_snapshots_invalid_param_sort(self):
+        """Test listing snapshots with invalid sort key should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.snapshots_client.list_snapshots,
                           sort_key='invalid')
@@ -78,6 +86,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b68deeda-ca79-4a32-81af-5c51179e553a')
     def test_list_snapshots_invalid_param_marker(self):
+        """Test listing snapshots with invalid marker should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.snapshots_client.list_snapshots,
                           marker=data_utils.rand_uuid())
diff --git a/tempest/clients.py b/tempest/clients.py
index 6aed92e..3d799c5 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,7 +44,7 @@
         self._set_object_storage_clients()
         self._set_image_clients()
         self._set_network_clients()
-        self.placement_client = self.placement.PlacementClient()
+        self._set_placement_clients()
         # TODO(andreaf) This is maintained for backward compatibility
         # with plugins, but it should removed eventually, since it was
         # never a stable interface and it's not useful anyways
@@ -59,6 +59,8 @@
         self.ports_client = self.network.PortsClient()
         self.network_quotas_client = self.network.QuotasClient()
         self.floating_ips_client = self.network.FloatingIPsClient()
+        self.floating_ips_port_forwarding_client =\
+            self.network.FloatingIpsPortForwardingClient()
         self.metering_labels_client = self.network.MeteringLabelsClient()
         self.metering_label_rules_client = (
             self.network.MeteringLabelRulesClient())
@@ -71,7 +73,11 @@
         self.tags_client = self.network.TagsClient()
         self.qos_client = self.network.QosClient()
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
+        self.qos_limit_bw_client = self.network.QosLimitBandwidthRulesClient()
         self.segments_client = self.network.SegmentsClient()
+        self.trunks_client = self.network.TrunksClient()
+        self.log_resource_client = self.network.LogResourceClient()
+        self.loggable_resource_client = self.network.LoggableResourceClient()
 
     def _set_image_clients(self):
         if CONF.service_available.glance:
@@ -88,6 +94,14 @@
                 self.image_v2.NamespacePropertiesClient()
             self.namespace_tags_client = self.image_v2.NamespaceTagsClient()
             self.image_versions_client = self.image_v2.VersionsClient()
+            # NOTE(danms): If no alternate endpoint is configured,
+            # this client will work the same as the base self.images_client.
+            # If your test needs to know if these are different, check the
+            # config option to see if the alternate_image_endpoint is set.
+            self.image_client_remote = self.image_v2.ImagesClient(
+                service=CONF.image.alternate_image_endpoint,
+                endpoint_type=CONF.image.alternate_image_endpoint_type,
+                region=CONF.image.region)
 
     def _set_compute_clients(self):
         self.agents_client = self.compute.AgentsClient()
@@ -125,6 +139,8 @@
         self.instance_usages_audit_log_client = (
             self.compute.InstanceUsagesAuditLogClient())
         self.tenant_networks_client = self.compute.TenantNetworksClient()
+        self.assisted_volume_snapshots_client = (
+            self.compute.AssistedVolumeSnapshotsClient())
 
         # NOTE: The following client needs special timeout values because
         # the API is a proxy for the other component.
@@ -139,6 +155,11 @@
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
 
+    def _set_placement_clients(self):
+        self.placement_client = self.placement.PlacementClient()
+        self.resource_providers_client = \
+            self.placement.ResourceProvidersClient()
+
     def _set_identity_clients(self):
         # Clients below use the admin endpoint type of Keystone API v2
         params_v2_admin = {
@@ -203,6 +224,10 @@
             **params_v3)
         self.application_credentials_client = \
             self.identity_v3.ApplicationCredentialsClient(**params_v3)
+        self.access_rules_client = \
+            self.identity_v3.AccessRulesClient(**params_v3)
+        self.identity_limits_client = \
+            self.identity_v3.LimitsClient(**params_v3)
 
         # Token clients do not use the catalog. They only need default_params.
         # They read auth_url, so they should only be set if the corresponding
@@ -224,87 +249,87 @@
 
     def _set_volume_clients(self):
 
-        # if only api_v3 is enabled, all these clients should be available
-        if (CONF.volume_feature_enabled.api_v2 or
-            CONF.volume_feature_enabled.api_v3):
-            self.backups_client_latest = self.volume_v3.BackupsClient()
-            self.encryption_types_client_latest = \
-                self.volume_v3.EncryptionTypesClient()
-            self.snapshot_manage_client_latest = \
-                self.volume_v3.SnapshotManageClient()
-            self.snapshots_client_latest = self.volume_v3.SnapshotsClient()
-            self.volume_capabilities_client_latest = \
-                self.volume_v3.CapabilitiesClient()
-            self.volume_manage_client_latest = (
-                self.volume_v3.VolumeManageClient())
-            self.volume_qos_client_latest = self.volume_v3.QosSpecsClient()
-            self.volume_services_client_latest = (
-                self.volume_v3.ServicesClient())
-            self.volume_types_client_latest = self.volume_v3.TypesClient()
-            self.volume_hosts_client_latest = self.volume_v3.HostsClient()
-            self.volume_quotas_client_latest = self.volume_v3.QuotasClient()
-            self.volume_quota_classes_client_latest = \
-                self.volume_v3.QuotaClassesClient()
-            self.volume_scheduler_stats_client_latest = \
-                self.volume_v3.SchedulerStatsClient()
-            self.volume_transfers_client_latest = \
-                self.volume_v3.TransfersClient()
-            self.volume_availability_zone_client_latest = \
-                self.volume_v3.AvailabilityZoneClient()
-            self.volume_limits_client_latest = self.volume_v3.LimitsClient()
-            self.volumes_client_latest = self.volume_v3.VolumesClient()
-            self.volumes_extension_client_latest = \
-                self.volume_v3.ExtensionsClient()
-            self.group_types_client_latest = self.volume_v3.GroupTypesClient()
-            self.groups_client_latest = self.volume_v3.GroupsClient()
-            self.group_snapshots_client_latest = \
-                self.volume_v3.GroupSnapshotsClient()
-            self.volume_messages_client_latest = (
-                self.volume_v3.MessagesClient())
-            self.volume_versions_client_latest = (
-                self.volume_v3.VersionsClient())
+        self.backups_client_latest = self.volume_v3.BackupsClient()
+        self.encryption_types_client_latest = \
+            self.volume_v3.EncryptionTypesClient()
+        self.snapshot_manage_client_latest = \
+            self.volume_v3.SnapshotManageClient()
+        self.snapshots_client_latest = self.volume_v3.SnapshotsClient()
+        self.volume_capabilities_client_latest = \
+            self.volume_v3.CapabilitiesClient()
+        self.volume_manage_client_latest = (
+            self.volume_v3.VolumeManageClient())
+        self.volume_qos_client_latest = self.volume_v3.QosSpecsClient()
+        self.volume_services_client_latest = (
+            self.volume_v3.ServicesClient())
+        self.volume_types_client_latest = self.volume_v3.TypesClient()
+        self.volume_hosts_client_latest = self.volume_v3.HostsClient()
+        self.volume_quotas_client_latest = self.volume_v3.QuotasClient()
+        self.volume_quota_classes_client_latest = \
+            self.volume_v3.QuotaClassesClient()
+        self.volume_scheduler_stats_client_latest = \
+            self.volume_v3.SchedulerStatsClient()
+        self.volume_transfers_client_latest = \
+            self.volume_v3.TransfersClient()
+        self.volume_transfers_mv355_client_latest = \
+            self.volume_v3.TransfersV355Client()
+        self.volume_availability_zone_client_latest = \
+            self.volume_v3.AvailabilityZoneClient()
+        self.volume_limits_client_latest = self.volume_v3.LimitsClient()
+        self.volumes_client_latest = self.volume_v3.VolumesClient()
+        self.volumes_extension_client_latest = \
+            self.volume_v3.ExtensionsClient()
+        self.group_types_client_latest = self.volume_v3.GroupTypesClient()
+        self.groups_client_latest = self.volume_v3.GroupsClient()
+        self.group_snapshots_client_latest = \
+            self.volume_v3.GroupSnapshotsClient()
+        self.volume_messages_client_latest = (
+            self.volume_v3.MessagesClient())
+        self.volume_versions_client_latest = (
+            self.volume_v3.VersionsClient())
+        self.attachments_client_latest = (
+            self.volume_v3.AttachmentsClient())
 
-            # TODO(gmann): Below alias for service clients have been
-            # deprecated and will be removed in future. Start using the alias
-            # defined above with suffix _latest.
-            # ****************Deprecated alias start from here***************
-            self.backups_v2_client = self.volume_v3.BackupsClient()
-            self.encryption_types_v2_client = \
-                self.volume_v3.EncryptionTypesClient()
-            self.snapshot_manage_v2_client = \
-                self.volume_v3.SnapshotManageClient()
-            self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
-            self.volume_capabilities_v2_client = \
-                self.volume_v3.CapabilitiesClient()
-            self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
-            self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
-            self.volume_services_v2_client = self.volume_v3.ServicesClient()
-            self.volume_types_v2_client = self.volume_v3.TypesClient()
-            self.volume_hosts_v2_client = self.volume_v3.HostsClient()
-            self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
-            self.volume_quota_classes_v2_client = \
-                self.volume_v3.QuotaClassesClient()
-            self.volume_scheduler_stats_v2_client = \
-                self.volume_v3.SchedulerStatsClient()
-            self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
-            self.volume_v2_availability_zone_client = \
-                self.volume_v3.AvailabilityZoneClient()
-            self.volume_v2_limits_client = self.volume_v3.LimitsClient()
-            self.volumes_v2_client = self.volume_v3.VolumesClient()
-            self.volumes_v2_extension_client = \
-                self.volume_v3.ExtensionsClient()
+        # TODO(gmann): Below alias for service clients have been
+        # deprecated and will be removed in future. Start using the alias
+        # defined above with suffix _latest.
+        # ****************Deprecated alias start from here***************
+        self.backups_v2_client = self.volume_v3.BackupsClient()
+        self.encryption_types_v2_client = \
+            self.volume_v3.EncryptionTypesClient()
+        self.snapshot_manage_v2_client = \
+            self.volume_v3.SnapshotManageClient()
+        self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
+        self.volume_capabilities_v2_client = \
+            self.volume_v3.CapabilitiesClient()
+        self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
+        self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
+        self.volume_services_v2_client = self.volume_v3.ServicesClient()
+        self.volume_types_v2_client = self.volume_v3.TypesClient()
+        self.volume_hosts_v2_client = self.volume_v3.HostsClient()
+        self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
+        self.volume_quota_classes_v2_client = \
+            self.volume_v3.QuotaClassesClient()
+        self.volume_scheduler_stats_v2_client = \
+            self.volume_v3.SchedulerStatsClient()
+        self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
+        self.volume_v2_availability_zone_client = \
+            self.volume_v3.AvailabilityZoneClient()
+        self.volume_v2_limits_client = self.volume_v3.LimitsClient()
+        self.volumes_v2_client = self.volume_v3.VolumesClient()
+        self.volumes_v2_extension_client = \
+            self.volume_v3.ExtensionsClient()
 
-        if CONF.volume_feature_enabled.api_v3:
-            self.backups_v3_client = self.volume_v3.BackupsClient()
-            self.group_types_v3_client = self.volume_v3.GroupTypesClient()
-            self.groups_v3_client = self.volume_v3.GroupsClient()
-            self.group_snapshots_v3_client = \
-                self.volume_v3.GroupSnapshotsClient()
-            self.snapshots_v3_client = self.volume_v3.SnapshotsClient()
-            self.volume_v3_messages_client = self.volume_v3.MessagesClient()
-            self.volume_v3_versions_client = self.volume_v3.VersionsClient()
-            self.volumes_v3_client = self.volume_v3.VolumesClient()
-            # ****************Deprecated alias end here***********************
+        self.backups_v3_client = self.volume_v3.BackupsClient()
+        self.group_types_v3_client = self.volume_v3.GroupTypesClient()
+        self.groups_v3_client = self.volume_v3.GroupsClient()
+        self.group_snapshots_v3_client = \
+            self.volume_v3.GroupSnapshotsClient()
+        self.snapshots_v3_client = self.volume_v3.SnapshotsClient()
+        self.volume_v3_messages_client = self.volume_v3.MessagesClient()
+        self.volume_v3_versions_client = self.volume_v3.VersionsClient()
+        self.volumes_v3_client = self.volume_v3.VolumesClient()
+        # ****************Deprecated alias end here***********************
 
     def _set_object_storage_clients(self):
         self.account_client = self.object_storage.AccountClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 1535786..917262e 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -96,6 +96,7 @@
 To see help on specific argument, please do: ``tempest account-generator
 [OPTIONS] <accounts_file.yaml> -h``.
 """
+
 import argparse
 import os
 import traceback
@@ -199,6 +200,14 @@
     LOG.info('%s generated successfully!', account_file)
 
 
+def positive_int(number):
+    number = int(number)
+    if number <= 0:
+        raise argparse.ArgumentTypeError("Concurrency value should be a "
+                                         "positive number")
+    return number
+
+
 def _parser_add_args(parser):
     parser.add_argument('-c', '--config-file',
                         metavar='/etc/tempest.conf',
@@ -228,7 +237,7 @@
                         help='Resources tag')
     parser.add_argument('-r', '--concurrency',
                         default=1,
-                        type=int,
+                        type=positive_int,
                         required=False,
                         dest='concurrency',
                         help='Concurrency count')
@@ -248,21 +257,6 @@
                         help='Output accounts yaml file')
 
 
-def get_options():
-    usage_string = ('tempest account-generator [-h] <ARG> ...\n\n'
-                    'To see help on specific argument, do:\n'
-                    'tempest account-generator <ARG> -h')
-    parser = argparse.ArgumentParser(
-        description=DESCRIPTION,
-        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
-        usage=usage_string
-    )
-
-    _parser_add_args(parser)
-    opts = parser.parse_args()
-    return opts
-
-
 class TempestAccountGenerator(command.Command):
 
     def get_parser(self, prog_name):
@@ -272,7 +266,19 @@
 
     def take_action(self, parsed_args):
         try:
-            main(parsed_args)
+            if parsed_args.config_file:
+                config.CONF.set_config_path(parsed_args.config_file)
+            setup_logging()
+            resources = []
+            for _ in range(parsed_args.concurrency):
+                # Use N different cred_providers to obtain different
+                # sets of creds
+                cred_provider = get_credential_provider(parsed_args)
+                resources.extend(generate_resources(cred_provider,
+                                                    parsed_args.admin))
+            dump_accounts(resources, parsed_args.identity_version,
+                          parsed_args.accounts)
+
         except Exception:
             LOG.exception("Failure generating test accounts.")
             traceback.print_exc()
@@ -280,26 +286,3 @@
 
     def get_description(self):
         return DESCRIPTION
-
-
-def main(opts=None):
-    log_warning = False
-    if not opts:
-        log_warning = True
-        opts = get_options()
-    if opts.config_file:
-        config.CONF.set_config_path(opts.config_file)
-    setup_logging()
-    if log_warning:
-        LOG.warning("Use of: 'tempest-account-generator' is deprecated, "
-                    "please use: 'tempest account-generator'")
-    resources = []
-    for count in range(opts.concurrency):
-        # Use N different cred_providers to obtain different sets of creds
-        cred_provider = get_credential_provider(opts)
-        resources.extend(generate_resources(cred_provider, opts.admin))
-    dump_accounts(resources, opts.identity_version, opts.accounts)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index c54b16b..0b96d9e 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -123,6 +123,16 @@
             raise Exception(self.GOT_EXCEPTIONS)
 
     def init(self, parsed_args):
+        # set new handler for logging to stdout, by default only INFO messages
+        # are logged to stdout
+        stdout_handler = logging.logging.StreamHandler()
+        # debug argument is defined in cliff already
+        if self.app_args.debug:
+            stdout_handler.level = logging.DEBUG
+        else:
+            stdout_handler.level = logging.INFO
+        LOG.handlers.append(stdout_handler)
+
         cleanup_service.init_conf()
         self.options = parsed_args
         self.admin_mgr = clients.Manager(
@@ -149,7 +159,7 @@
         self._load_json()
 
     def _cleanup(self):
-        print("Begin cleanup")
+        LOG.info("Begin cleanup")
         is_dry_run = self.options.dry_run
         is_preserve = not self.options.delete_tempest_conf_objects
         is_save_state = False
@@ -167,7 +177,7 @@
                   'is_save_state': is_save_state}
         project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
         projects = project_service.list()
-        print("Process %s projects" % len(projects))
+        LOG.info("Processing %s projects", len(projects))
 
         # Loop through list of projects and clean them up.
         for project in projects:
@@ -179,10 +189,12 @@
                   'is_preserve': is_preserve,
                   'is_save_state': is_save_state,
                   'got_exceptions': self.GOT_EXCEPTIONS}
+        LOG.info("Processing global services")
         for service in self.global_services:
             svc = service(admin_mgr, **kwargs)
             svc.run()
 
+        LOG.info("Processing services")
         for service in self.resource_cleanup_services:
             svc = service(self.admin_mgr, **kwargs)
             svc.run()
@@ -193,7 +205,7 @@
                                    indent=2, separators=(',', ': ')))
 
     def _clean_project(self, project):
-        print("Cleaning project:  %s " % project['name'])
+        LOG.debug("Cleaning project:  %s ", project['name'])
         is_dry_run = self.options.dry_run
         dry_run_data = self.dry_run_data
         is_preserve = not self.options.delete_tempest_conf_objects
@@ -263,7 +275,7 @@
         return 'Cleanup after tempest run'
 
     def _init_state(self):
-        print("Initializing saved state.")
+        LOG.info("Initializing saved state.")
         data = {}
         admin_mgr = self.admin_mgr
         kwargs = {'data': data,
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 5b3b72a..f2370f3 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_log import log as logging
 
 from tempest import clients
@@ -22,7 +24,7 @@
 from tempest import config
 from tempest.lib import exceptions
 
-LOG = logging.getLogger(__name__)
+LOG = logging.getLogger('tempest.cmd.cleanup')
 CONF = config.CONF
 
 CONF_FLAVORS = None
@@ -166,6 +168,7 @@
         client = self.client
         for snap in snaps:
             try:
+                LOG.debug("Deleting Snapshot with id %s", snap['id'])
                 client.delete_snapshot(snap['id'])
             except Exception:
                 LOG.exception("Delete Snapshot %s exception.", snap['id'])
@@ -203,6 +206,7 @@
         servers = self.list()
         for server in servers:
             try:
+                LOG.debug("Deleting Server with id %s", server['id'])
                 client.delete_server(server['id'])
             except Exception:
                 LOG.exception("Delete Server %s exception.", server['id'])
@@ -235,6 +239,7 @@
         sgs = self.list()
         for sg in sgs:
             try:
+                LOG.debug("Deleting Server Group with id %s", sg['id'])
                 client.delete_server_group(sg['id'])
             except Exception:
                 LOG.exception("Delete Server Group %s exception.", sg['id'])
@@ -272,6 +277,7 @@
         for k in keypairs:
             name = k['keypair']['name']
             try:
+                LOG.debug("Deleting keypair %s", name)
                 client.delete_keypair(name)
             except Exception:
                 LOG.exception("Delete Keypair %s exception.", name)
@@ -308,6 +314,7 @@
         vols = self.list()
         for v in vols:
             try:
+                LOG.debug("Deleting volume with id %s", v['id'])
                 client.delete_volume(v['id'])
             except Exception:
                 LOG.exception("Delete Volume %s exception.", v['id'])
@@ -331,6 +338,8 @@
     def delete(self):
         client = self.client
         try:
+            LOG.debug("Deleting Volume Quotas for project with id %s",
+                      self.project_id)
             client.delete_quota_set(self.project_id)
         except Exception:
             LOG.exception("Delete Volume Quotas exception for 'project %s'.",
@@ -351,9 +360,11 @@
     def delete(self):
         client = self.client
         try:
+            LOG.debug("Deleting Nova Quotas for project with id %s",
+                      self.project_id)
             client.delete_quota_set(self.project_id)
         except Exception:
-            LOG.exception("Delete Quotas exception for 'project %s'.",
+            LOG.exception("Delete Nova Quotas exception for 'project %s'.",
                           self.project_id)
 
     def dry_run(self):
@@ -370,6 +381,8 @@
     def delete(self):
         client = self.client
         try:
+            LOG.debug("Deleting Network Quotas for project with id %s",
+                      self.project_id)
             client.reset_quotas(self.project_id)
         except Exception:
             LOG.exception("Delete Network Quotas exception for 'project %s'.",
@@ -418,7 +431,7 @@
         if self.is_preserve:
             networks = [network for network in networks
                         if network['id'] not in CONF_NETWORKS]
-        LOG.debug("List count, %s Networks", networks)
+        LOG.debug("List count, %s Networks", len(networks))
         return networks
 
     def delete(self):
@@ -426,6 +439,7 @@
         networks = self.list()
         for n in networks:
             try:
+                LOG.debug("Deleting Network with id %s", n['id'])
                 client.delete_network(n['id'])
             except Exception:
                 LOG.exception("Delete Network %s exception.", n['id'])
@@ -460,6 +474,8 @@
         flips = self.list()
         for flip in flips:
             try:
+                LOG.debug("Deleting Network Floating IP with id %s",
+                          flip['id'])
                 client.delete_floatingip(flip['id'])
             except Exception:
                 LOG.exception("Delete Network Floating IP %s exception.",
@@ -505,11 +521,14 @@
                      if net_info.is_router_interface_port(port)]
             for port in ports:
                 try:
+                    LOG.debug("Deleting port with id %s of router with id %s",
+                              port['id'], rid)
                     client.remove_router_interface(rid, port_id=port['id'])
                 except Exception:
                     LOG.exception("Delete Router Interface exception for "
                                   "'port %s' of 'router %s'.", port['id'], rid)
             try:
+                LOG.debug("Deleting Router with id %s", rid)
                 client.delete_router(rid)
             except Exception:
                 LOG.exception("Delete Router %s exception.", rid)
@@ -545,6 +564,8 @@
         rules = self.list()
         for rule in rules:
             try:
+                LOG.debug("Deleting Metering Label Rule with id %s",
+                          rule['id'])
                 client.delete_metering_label_rule(rule['id'])
             except Exception:
                 LOG.exception("Delete Metering Label Rule %s exception.",
@@ -581,6 +602,7 @@
         labels = self.list()
         for label in labels:
             try:
+                LOG.debug("Deleting Metering Label with id %s", label['id'])
                 client.delete_metering_label(label['id'])
             except Exception:
                 LOG.exception("Delete Metering Label %s exception.",
@@ -621,6 +643,7 @@
         ports = self.list()
         for port in ports:
             try:
+                LOG.debug("Deleting port with id %s", port['id'])
                 client.delete_port(port['id'])
             except Exception:
                 LOG.exception("Delete Port %s exception.", port['id'])
@@ -662,6 +685,7 @@
         secgroups = self.list()
         for secgroup in secgroups:
             try:
+                LOG.debug("Deleting security_group with id %s", secgroup['id'])
                 client.delete_security_group(secgroup['id'])
             except Exception:
                 LOG.exception("Delete security_group %s exception.",
@@ -698,6 +722,7 @@
         subnets = self.list()
         for subnet in subnets:
             try:
+                LOG.debug("Deleting subnet with id %s", subnet['id'])
                 client.delete_subnet(subnet['id'])
             except Exception:
                 LOG.exception("Delete Subnet %s exception.", subnet['id'])
@@ -733,6 +758,7 @@
         pools = self.list()
         for pool in pools:
             try:
+                LOG.debug("Deleting Subnet Pool with id %s", pool['id'])
                 client.delete_subnetpool(pool['id'])
             except Exception:
                 LOG.exception("Delete Subnet Pool %s exception.", pool['id'])
@@ -761,8 +787,10 @@
         if not self.is_save_state:
             regions = [region for region in regions['regions'] if region['id']
                        not in self.saved_state_json['regions'].keys()]
+            LOG.debug("List count, %s Regions", len(regions))
             return regions
         else:
+            LOG.debug("List count, %s Regions", len(regions['regions']))
             return regions['regions']
 
     def delete(self):
@@ -770,6 +798,7 @@
         regions = self.list()
         for region in regions:
             try:
+                LOG.debug("Deleting region with id %s", region['id'])
                 client.delete_region(region['id'])
             except Exception:
                 LOG.exception("Delete Region %s exception.", region['id'])
@@ -811,6 +840,7 @@
         flavors = self.list()
         for flavor in flavors:
             try:
+                LOG.debug("Deleting flavor with id %s", flavor['id'])
                 client.delete_flavor(flavor['id'])
             except Exception:
                 LOG.exception("Delete Flavor %s exception.", flavor['id'])
@@ -833,7 +863,15 @@
 
     def list(self):
         client = self.client
-        images = client.list_images(params={"all_tenants": True})['images']
+        response = client.list_images()
+        images = []
+        images.extend(response['images'])
+        while 'next' in response:
+            parsed = urllib.urlparse(response['next'])
+            marker = urllib.parse_qs(parsed.query)['marker'][0]
+            response = client.list_images(params={"marker": marker})
+            images.extend(response['images'])
+
         if not self.is_save_state:
             images = [image for image in images if image['id']
                       not in self.saved_state_json['images'].keys()]
@@ -848,6 +886,7 @@
         images = self.list()
         for image in images:
             try:
+                LOG.debug("Deleting image with id %s", image['id'])
                 client.delete_image(image['id'])
             except Exception:
                 LOG.exception("Delete Image %s exception.", image['id'])
@@ -891,6 +930,7 @@
         users = self.list()
         for user in users:
             try:
+                LOG.debug("Deleting user with id %s", user['id'])
                 self.client.delete_user(user['id'])
             except Exception:
                 LOG.exception("Delete User %s exception.", user['id'])
@@ -931,6 +971,7 @@
         roles = self.list()
         for role in roles:
             try:
+                LOG.debug("Deleting role with id %s", role['id'])
                 self.client.delete_role(role['id'])
             except Exception:
                 LOG.exception("Delete Role %s exception.", role['id'])
@@ -973,6 +1014,7 @@
         projects = self.list()
         for project in projects:
             try:
+                LOG.debug("Deleting project with id %s", project['id'])
                 self.client.delete_project(project['id'])
             except Exception:
                 LOG.exception("Delete project %s exception.", project['id'])
@@ -1009,6 +1051,7 @@
         domains = self.list()
         for domain in domains:
             try:
+                LOG.debug("Deleting domain with id %s", domain['id'])
                 client.update_domain(domain['id'], enabled=False)
                 client.delete_domain(domain['id'])
             except Exception:
@@ -1039,6 +1082,8 @@
         project_associated_services.append(NovaQuotaService)
     if IS_CINDER:
         project_associated_services.append(VolumeQuotaService)
+    if IS_NEUTRON:
+        project_associated_services.append(NetworkQuotaService)
     return project_associated_services
 
 
@@ -1067,7 +1112,6 @@
         resource_cleanup_services.append(NetworkService)
         resource_cleanup_services.append(NetworkSecGroupService)
         resource_cleanup_services.append(NetworkSubnetPoolsService)
-        resource_cleanup_services.append(NetworkQuotaService)
     if IS_CINDER:
         resource_cleanup_services.append(SnapshotService)
         resource_cleanup_services.append(VolumeService)
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index d84f3a3..3eae552 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -12,6 +12,7 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import configparser
 import os
 import shutil
 import sys
@@ -19,7 +20,6 @@
 from cliff import command
 from oslo_config import generator
 from oslo_log import log as logging
-from six import moves
 from stestr import commands
 
 from tempest.cmd import workspace
@@ -44,11 +44,14 @@
 
     :return: default config dir
     """
+    # NOTE: The default directory should be on a Linux box.
     global_conf_dir = '/etc/tempest'
     xdg_config = os.environ.get('XDG_CONFIG_HOME',
-                                os.path.expanduser('~/.config'))
+                                os.path.expanduser(os.path.join('~',
+                                                                '.config')))
     user_xdg_global_path = os.path.join(xdg_config, 'tempest')
-    user_global_path = os.path.join(os.path.expanduser('~'), '.tempest/etc')
+    user_global_path = os.path.join(os.path.expanduser('~'),
+                                    '.tempest', 'etc')
     if os.path.isdir(global_conf_dir):
         return global_conf_dir
     elif os.path.isdir(user_xdg_global_path):
@@ -89,7 +92,7 @@
             stestr_conf_file.write(stestr_conf)
 
     def get_configparser(self, conf_path):
-        config_parse = moves.configparser.ConfigParser()
+        config_parse = configparser.ConfigParser()
         config_parse.optionxform = str
         # get any existing values if a config file already exists
         if os.path.isfile(conf_path):
@@ -121,7 +124,7 @@
     def generate_sample_config(self, local_dir):
         conf_generator = os.path.join(os.path.dirname(__file__),
                                       'config-generator.tempest.conf')
-        output_file = os.path.join(local_dir, 'etc/tempest.conf.sample')
+        output_file = os.path.join(local_dir, 'etc', 'tempest.conf.sample')
         if os.path.isfile(conf_generator):
             generator.main(['--config-file', conf_generator, '--output-file',
                             output_file])
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index f9ca2c7..2669ff7 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -22,10 +22,10 @@
 * ``--regex/-r``: This is a selection regex like what stestr uses. It will run
   any tests that match on re.match() with the regex
 * ``--smoke/-s``: Run all the tests tagged as smoke
-* ``--black-regex``: It allows to do simple test exclusion via passing a
-  rejection/black regexp
+* ``--exclude-regex``: It allows to do simple test exclusion via passing a
+  rejection/exclude regexp
 
-There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
+There are also the ``--exclude-list`` and ``--include-list`` options that
 let you pass a filepath to tempest run with the file format being a line
 separated regex, with '#' used to signify the start of a comment on a line.
 For example::
@@ -47,6 +47,42 @@
 by removing unnecessary tests from a list file which is generated from
 ``--list-tests`` option.
 
+You can also use ``--worker-file`` option that let you pass a filepath to a
+worker yaml file, allowing you to manually schedule the tests run.
+For example, you can setup a tempest run with
+different concurrences to be used with different regexps.
+An example of worker file is showed below::
+
+    # YAML Worker file
+    - worker:
+      # you can have more than one regex per worker
+      - tempest.api.*
+      - neutron_tempest_tests
+    - worker:
+      - tempest.scenario.*
+
+This will run test matching with 'tempest.api.*' and 'neutron_tempest_tests'
+against worker 1. Run tests matching with 'tempest.scenario.*' under worker 2.
+
+You can mix manual scheduling with the standard scheduling mechanisms by
+concurrency field on a worker. For example::
+
+    # YAML Worker file
+    - worker:
+      # you can have more than one regex per worker
+      - tempest.api.*
+      - neutron_tempest_tests
+      concurrency: 3
+    - worker:
+      - tempest.scenario.*
+      concurrency: 2
+
+This will run tests matching with 'tempest.scenario.*' against 2 workers.
+
+This worker file is passed into stestr. For some more details on how it
+operates please refer to the stestr scheduling docs:
+https://stestr.readthedocs.io/en/stable/MANUAL.html#test-scheduling
+
 Test Execution
 ==============
 There are several options to control how the tests are executed. By default
@@ -92,8 +128,8 @@
 import sys
 
 from cliff import command
+from oslo_log import log
 from oslo_serialization import jsonutils as json
-import six
 from stestr import commands
 
 from tempest import clients
@@ -103,13 +139,11 @@
 from tempest.common import credentials_factory as credentials
 from tempest import config
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 CONF = config.CONF
 SAVED_STATE_JSON = "saved_state.json"
 
+LOG = log.getLogger(__name__)
+
 
 class TempestRun(command.Command):
 
@@ -131,7 +165,7 @@
         # environment variable and fall back to "python", under python3
         # if it does not exist. we should set it to the python3 executable
         # to deal with this situation better for now.
-        if six.PY3 and 'PYTHON' not in os.environ:
+        if 'PYTHON' not in os.environ:
             os.environ['PYTHON'] = sys.executable
 
     def _create_stestr_conf(self):
@@ -170,22 +204,71 @@
             self._init_state()
 
         regex = self._build_regex(parsed_args)
+
+        # temporary method for parsing deprecated and new stestr options
+        # and showing warning messages in order to make the transition
+        # smoother for all tempest consumers
+        # TODO(kopecmartin) remove this after stestr>=3.1.0 is used
+        # in all supported OpenStack releases
+        def parse_dep(old_o, old_v, new_o, new_v):
+            ret = ''
+            if old_v:
+                LOG.warning("'%s' option is deprecated, use '%s' instead "
+                            "which is functionally equivalent. Right now "
+                            "Tempest still supports this option for "
+                            "backward compatibility, however, it will be "
+                            "removed soon.",
+                            old_o, new_o)
+                ret = old_v
+            if old_v and new_v:
+                # both options are specified
+                LOG.warning("'%s' and '%s' are specified at the same time, "
+                            "'%s' takes precedence over '%s'",
+                            new_o, old_o, new_o, old_o)
+            if new_v:
+                ret = new_v
+            return ret
+        ex_regex = parse_dep('--black-regex', parsed_args.black_regex,
+                             '--exclude-regex', parsed_args.exclude_regex)
+        ex_list = parse_dep('--blacklist-file', parsed_args.blacklist_file,
+                            '--exclude-list', parsed_args.exclude_list)
+        in_list = parse_dep('--whitelist-file', parsed_args.whitelist_file,
+                            '--include-list', parsed_args.include_list)
+
         return_code = 0
         if parsed_args.list_tests:
-            return_code = commands.list_command(
-                filters=regex, whitelist_file=parsed_args.whitelist_file,
-                blacklist_file=parsed_args.blacklist_file,
-                black_regex=parsed_args.black_regex)
+            try:
+                return_code = commands.list_command(
+                    filters=regex, include_list=in_list,
+                    exclude_list=ex_list, exclude_regex=ex_regex)
+            except TypeError:
+                # exclude_list, include_list and exclude_regex are defined only
+                # in stestr >= 3.1.0, this except block catches the case when
+                # tempest is executed with an older stestr
+                return_code = commands.list_command(
+                    filters=regex, whitelist_file=in_list,
+                    blacklist_file=ex_list, black_regex=ex_regex)
 
         else:
             serial = not parsed_args.parallel
-            return_code = commands.run_command(
-                filters=regex, subunit_out=parsed_args.subunit,
-                serial=serial, concurrency=parsed_args.concurrency,
-                blacklist_file=parsed_args.blacklist_file,
-                whitelist_file=parsed_args.whitelist_file,
-                black_regex=parsed_args.black_regex,
-                load_list=parsed_args.load_list, combine=parsed_args.combine)
+            params = {
+                'filters': regex, 'subunit_out': parsed_args.subunit,
+                'serial': serial, 'concurrency': parsed_args.concurrency,
+                'worker_path': parsed_args.worker_file,
+                'load_list': parsed_args.load_list,
+                'combine': parsed_args.combine
+            }
+            try:
+                return_code = commands.run_command(
+                    **params, exclude_list=ex_list,
+                    include_list=in_list, exclude_regex=ex_regex)
+            except TypeError:
+                # exclude_list, include_list and exclude_regex are defined only
+                # in stestr >= 3.1.0, this except block catches the case when
+                # tempest is executed with an older stestr
+                return_code = commands.run_command(
+                    **params, blacklist_file=ex_list,
+                    whitelist_file=in_list, black_regex=ex_regex)
             if return_code > 0:
                 sys.exit(return_code)
         return return_code
@@ -239,21 +322,48 @@
                            help='A normal stestr selection regex used to '
                                 'specify a subset of tests to run')
         parser.add_argument('--black-regex', dest='black_regex',
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --exclude-regex '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--exclude-regex, this flag will be ignored '
+                                 'and --exclude-regex will be used')
+        parser.add_argument('--exclude-regex', dest='exclude_regex',
                             help='A regex to exclude tests that match it')
         parser.add_argument('--whitelist-file', '--whitelist_file',
-                            help="Path to a whitelist file, this file "
-                            "contains a separate regex on each "
-                            "newline.")
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --include-list '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--include-list, this flag will be ignored '
+                                 'and --include-list will be used')
+        parser.add_argument('--include-list', '--include_list',
+                            help="Path to an include file which contains the "
+                                 "regex for tests to be included in tempest "
+                                 "run, this file contains a separate regex on "
+                                 "each newline.")
         parser.add_argument('--blacklist-file', '--blacklist_file',
-                            help='Path to a blacklist file, this file '
-                                 'contains a separate regex exclude on '
-                                 'each newline')
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --exclude-list '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--exclude-list, this flag will be ignored '
+                                 'and --exclude-list will be used')
+        parser.add_argument('--exclude-list', '--exclude_list',
+                            help='Path to an exclude file which contains the '
+                                 'regex for tests to be excluded in tempest '
+                                 'run, this file contains a separate regex on '
+                                 'each newline.')
         parser.add_argument('--load-list', '--load_list',
                             help='Path to a non-regex whitelist file, '
                                  'this file contains a separate test '
                                  'on each newline. This command '
                                  'supports files created by the tempest '
                                  'run ``--list-tests`` command')
+        parser.add_argument('--worker-file', '--worker_file',
+                            help='Optional path to a worker file. This file '
+                            'contains each worker configuration to be '
+                            'used to schedule the tests run')
         # list only args
         parser.add_argument('--list-tests', '-l', action='store_true',
                             help='List tests',
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index e029538..6c36d82 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -30,6 +30,8 @@
 * ``--ports, -p``: (Optional) The path to a JSON file describing the ports
   being used by different services
 * ``--verbose, -v``: (Optional) Print Request and Response Headers and Body
+  data to stdout in the non cliff deprecated CLI
+* ``--all-stdout, -a``: (Optional) Print Request and Response Headers and Body
   data to stdout
 
 
@@ -38,12 +40,14 @@
 
 subunit-describe-calls will take in either stdin subunit v1 or v2 stream or a
 file path which contains either a subunit v1 or v2 stream passed via the
---subunit parameter. This is then parsed checking for details contained in the
-file_bytes of the --non-subunit-name parameter (the default is pythonlogging
-which is what Tempest uses to store logs). By default the OpenStack Kilo
-release port defaults (http://bit.ly/22jpF5P) are used unless a file is
-provided via the --ports option. The resulting output is dumped in JSON output
-to the path provided in the --output-file option.
+``--subunit`` parameter. This is then parsed checking for details contained in
+the file_bytes of the ``--non-subunit-name`` parameter (the default is
+pythonlogging which is what Tempest uses to store logs). By default `the
+OpenStack default ports
+<https://docs.openstack.org/install-guide/firewalls-default-ports.html>`_
+are used unless a file is provided via the ``--ports`` option. The resulting
+output is dumped in JSON output to the path provided in the ``--output-file``
+option.
 
 Ports file JSON structure
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -108,9 +112,8 @@
     response_re = re.compile(r'.* Response - Headers: (?P<headers>.*)')
     body_re = re.compile(r'.*Body: (?P<body>.*)')
 
-    # Based on newton defaults:
-    # http://docs.openstack.org/newton/config-reference/
-    # firewalls-default-ports.html
+    # Based on OpenStack default ports:
+    # https://docs.openstack.org/install-guide/firewalls-default-ports.html
     services = {
         "8776": "Block Storage",
         "8774": "Nova",
@@ -129,7 +132,10 @@
         "3260": "iSCSI",
         "3306": "MySQL",
         "5672": "AMQP",
-        "8082": "murano"}
+        "8082": "murano",
+        "8778": "Clustering",
+        "8999": "Vitrage",
+        "8989": "Mistral"}
 
     def __init__(self, services=None):
         super(UrlParser, self).__init__()
@@ -278,7 +284,7 @@
     return url_parser
 
 
-def output(url_parser, output_file, verbose):
+def output(url_parser, output_file, all_stdout):
     if output_file is not None:
         with open(output_file, "w") as outfile:
             outfile.write(json.dumps(url_parser.test_logs))
@@ -294,7 +300,7 @@
             sys.stdout.write('\t- {0} {1} request for {2} to {3}\n'.format(
                 item.get('status_code'), item.get('verb'),
                 item.get('service'), item.get('url')))
-            if verbose:
+            if all_stdout:
                 sys.stdout.write('\t\t- request headers: {0}\n'.format(
                     item.get('request_headers')))
                 sys.stdout.write('\t\t- request body: {0}\n'.format(
@@ -313,7 +319,7 @@
               "please use: 'tempest subunit-describe-calls'")
         cl_args = ArgumentParser().parse_args()
     parser = parse(cl_args.subunit, cl_args.non_subunit_name, cl_args.ports)
-    output(parser, cl_args.output_file, cl_args.verbose)
+    output(parser, cl_args.output_file, cl_args.all_stdout)
 
 
 def _parser_add_args(parser):
@@ -339,9 +345,23 @@
         help="A JSON file describing the ports for each service."
     )
 
-    parser.add_argument(
-        "-v", "--verbose", action='store_true', default=False,
-        help="Add Request and Response header and body data to stdout."
+    group = parser.add_mutually_exclusive_group()
+    # the -v and --verbose command are for the old subunit-describe-calls
+    # main() CLI interface.  It does not work with the new
+    # tempest subunit-describe-callss CLI. So when the main CLI approach is
+    # deleted this argument is not needed.
+    group.add_argument(
+        "-v", "--verbose", action='store_true', dest='all_stdout',
+        help='Add Request and Response header and body data to stdout print.'
+             ' NOTE: This argument deprecated and does not work with'
+             ' tempest subunit-describe-calls CLI.'
+             ' Use new option: "-a", "--all-stdout"'
+    )
+    group.add_argument(
+        "-a", "--all-stdout", action='store_true',
+        help="Add Request and Response header and body data to stdout print."
+             " Note: this argument work with the subunit-describe-calls and"
+             " tempest subunit-describe-calls CLI commands."
     )
 
 
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 8d5bdbd..0db1ab1 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -60,16 +60,16 @@
 """
 
 import argparse
+import configparser
 import os
 import re
 import sys
 import traceback
+from urllib import parse as urlparse
 
 from cliff import command
 from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
-from six import moves
-from six.moves.urllib import parse as urlparse
 
 from tempest import clients
 from tempest.common import credentials_factory as credentials
@@ -202,22 +202,8 @@
                             not CONF.identity_feature_enabled.api_v3, update)
 
 
-def verify_cinder_api_versions(os, update):
-    # Check cinder api versions
-    versions = _get_api_versions(os, 'cinder')
-    if (CONF.volume_feature_enabled.api_v2 !=
-            contains_version('v2.', versions)):
-        print_and_or_update('api_v2', 'volume-feature-enabled',
-                            not CONF.volume_feature_enabled.api_v2, update)
-    if (CONF.volume_feature_enabled.api_v3 !=
-            contains_version('v3.', versions)):
-        print_and_or_update('api_v3', 'volume-feature-enabled',
-                            not CONF.volume_feature_enabled.api_v3, update)
-
-
 def verify_api_versions(os, service, update):
     verify = {
-        'cinder': verify_cinder_api_versions,
         'glance': verify_glance_api_versions,
         'keystone': verify_keystone_api_versions,
     }
@@ -439,9 +425,9 @@
 
     if update:
         conf_file = _get_config_file()
-        CONF_PARSER = moves.configparser.ConfigParser()
+        CONF_PARSER = configparser.ConfigParser()
         CONF_PARSER.optionxform = str
-        CONF_PARSER.readfp(conf_file)
+        CONF_PARSER.read_file(conf_file)
 
     # Indicate not to create network resources as part of getting credentials
     net_resources = {
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index cd85ede..a062f6f 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -18,9 +18,7 @@
 import ssl
 import struct
 import textwrap
-
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from oslo_log import log as logging
 from oslo_utils import excutils
@@ -31,11 +29,6 @@
 from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
@@ -64,7 +57,7 @@
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
-                       image_id=None, **kwargs):
+                       image_id=None, wait_for_sshable=True, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -100,6 +93,8 @@
         CONF.compute.flavor_ref will be used instead.
     :param image_id: ID of the image to be used to provision the server. If not
         defined, CONF.compute.image_ref will be used instead.
+    :param wait_for_sshable: Check server's console log and wait until it will
+        be ready to login.
     :returns: a tuple
     """
 
@@ -270,6 +265,10 @@
                             LOG.exception('Server %s failed to delete in time',
                                           server['id'])
 
+    if (validatable and CONF.compute_feature_enabled.console_output and
+            wait_for_sshable):
+        waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
+
     return body, servers
 
 
@@ -365,8 +364,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord_func(header[1]) & 127 > 0:
-                return self._recv(ord_func(header[1]) & 127)
+            if int(header[1]) & 127 > 0:
+                return self._recv(int(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -383,7 +382,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+            frame_bytes.append(int(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
@@ -400,9 +399,24 @@
         """Upgrade the HTTP connection to a WebSocket and verify."""
         # It is possible to pass the path as a query parameter in the request,
         # so use it if present
+        # Given noVNC format
+        # https://x.com/vnc_auto.html?path=%3Ftoken%3Dxxx,
+        # url format is
+        # ParseResult(scheme='https', netloc='x.com',
+        # path='/vnc_auto.html', params='',
+        # query='path=%3Ftoken%3Dxxx', fragment='').
+        # qparams format is {'path': ['?token=xxx']}
         qparams = urlparse.parse_qs(url.query)
-        path = qparams['path'][0] if 'path' in qparams else '/websockify'
-        reqdata = 'GET %s HTTP/1.1\r\n' % path
+        # according to references
+        # https://docs.python.org/3/library/urllib.parse.html
+        # https://tools.ietf.org/html/rfc3986#section-3.4
+        # qparams['path'][0] format is '?token=xxx' without / prefix
+        # remove / in /websockify to comply to references.
+        path = qparams['path'][0] if 'path' in qparams else 'websockify'
+        # Fix websocket request format by adding / prefix.
+        # Updated request format: GET /?token=xxx HTTP/1.1
+        # or GET /websockify HTTP/1.1
+        reqdata = 'GET /%s HTTP/1.1\r\n' % path
         reqdata += 'Host: %s' % url.hostname
         # Add port only if we have one specified
         if url.port:
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index c6e5dcb..2d486a7 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -245,6 +245,9 @@
 
     if identity_version == 'v3':
         conf_attributes.append('domain_name')
+        conf_attributes.append('user_domain_name')
+        conf_attributes.append('project_domain_name')
+        conf_attributes.append('system')
     # Read the parts of credentials from config
     params = config.service_client_config()
     for attr in conf_attributes:
@@ -284,7 +287,8 @@
     if identity_version == 'v3':
         domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
                             if 'domain' in x)
-        if not domain_fields.intersection(kwargs.keys()):
+        if (not params.get('system') and
+                not domain_fields.intersection(kwargs.keys())):
             domain_name = CONF.auth.default_credentials_domain_name
             # NOTE(andreaf) Setting domain_name implicitly sets user and
             # project domain names, if they are None
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index c702d88..b0bf5b2 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -62,8 +62,9 @@
         # [1] https://bugs.launchpad.net/swift/+bug/1537811
         # [2] http://tracker.ceph.com/issues/13582
         if ('content-length' not in actual and
+                'transfer-encoding' not in actual and
                 self._content_length_required(actual)):
-            return NonExistentHeader('content-length')
+            return NonExistentHeaders(['content-length', 'transfer-encoding'])
         if 'content-type' not in actual:
             return NonExistentHeader('content-type')
         if 'x-trans-id' not in actual:
@@ -75,8 +76,6 @@
         if self.method == 'GET' or self.method == 'HEAD':
             if 'x-timestamp' not in actual:
                 return NonExistentHeader('x-timestamp')
-            if 'accept-ranges' not in actual:
-                return NonExistentHeader('accept-ranges')
             if self.target == 'Account':
                 if 'x-account-bytes-used' not in actual:
                     return NonExistentHeader('x-account-bytes-used')
@@ -192,6 +191,19 @@
         return {}
 
 
+class NonExistentHeaders(object):
+    """Informs an error message in the case of missing certain headers"""
+
+    def __init__(self, headers):
+        self.headers = headers
+
+    def describe(self):
+        return "none of these headers exist: %s" % self.headers
+
+    def get_details(self):
+        return {}
+
+
 class InvalidHeaderValue(object):
     """Informs an error message when a header contains a bad value"""
 
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 167bf5b..88a16b7 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -13,37 +13,17 @@
 #    under the License.
 
 import functools
-from functools import partial
 
 import testtools
 
 from tempest import config
 from tempest.exceptions import InvalidServiceTag
-from tempest.lib.common.utils import data_utils as lib_data_utils
 from tempest.lib import decorators
 
 
 CONF = config.CONF
 
 
-class DataUtils(object):
-    def __getattr__(self, attr):
-
-        if attr == 'rand_name':
-            # NOTE(flwang): This is a proxy to generate a random name that
-            # includes a random number and a prefix 'tempest'
-            attr_obj = partial(lib_data_utils.rand_name,
-                               prefix='tempest')
-        else:
-            attr_obj = getattr(lib_data_utils, attr)
-
-        self.__dict__[attr] = attr_obj
-        return attr_obj
-
-
-data_utils = DataUtils()
-
-
 def get_service_list():
     service_list = {
         'compute': CONF.service_available.nova,
@@ -59,6 +39,7 @@
         # So we should set this True here.
         'identity': True,
         'object_storage': CONF.service_available.swift,
+        'dashboard': CONF.service_available.horizon,
     }
     return service_list
 
@@ -128,3 +109,18 @@
     if extension_name in config_dict[service]:
         return True
     return False
+
+
+def is_network_feature_enabled(feature_name):
+    """A function that will check the list of available network features
+
+    """
+    list_of_features = CONF.network_feature_enabled.available_features
+
+    if not list_of_features:
+        return False
+    if list_of_features[0] == 'all':
+        return True
+    if feature_name in list_of_features:
+        return True
+    return False
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index b68a879..5d6e129 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -108,7 +108,7 @@
         LOG.debug('(get_nic_name_by_ip) Command result: %s', nic)
         return nic.strip().strip(":").split('@')[0].lower()
 
-    def get_dns_servers(self):
+    def _get_dns_servers(self):
         cmd = 'cat /etc/resolv.conf'
         resolve_file = self.exec_command(cmd).strip().split('\n')
         entries = (l.split() for l in resolve_file)
@@ -116,6 +116,19 @@
                        if len(l) and l[0] == 'nameserver']
         return dns_servers
 
+    def get_dns_servers(self, timeout=5):
+        start_time = int(time.time())
+        dns_servers = []
+        while True:
+            dns_servers = self._get_dns_servers()
+            if dns_servers:
+                break
+            LOG.debug("DNS Servers list empty.")
+            if int(time.time()) - start_time >= timeout:
+                LOG.debug("DNS Servers list empty after %s.", timeout)
+                break
+        return dns_servers
+
     def _renew_lease_udhcpc(self, fixed_ip=None):
         """Renews DHCP lease via udhcpc client. """
         file_path = '/var/run/udhcpc.'
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index b547cc6..1b69349 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -125,6 +125,18 @@
                 "Server %s failed to delete and is in ERROR status" %
                 server_id)
 
+        if server_status == 'SOFT_DELETED':
+            # Soft-deleted instances need to be forcibly deleted to
+            # prevent some test cases from failing.
+            LOG.debug("Automatically force-deleting soft-deleted server %s",
+                      server_id)
+            try:
+                client.force_delete_server(server_id)
+            except lib_exc.NotFound:
+                # The instance may have been deleted so ignore
+                # NotFound exception
+                return
+
         if int(time.time()) - start_time >= client.build_timeout:
             raise lib_exc.TimeoutException
         old_status = server_status
@@ -181,6 +193,91 @@
     raise lib_exc.TimeoutException(message)
 
 
+def wait_for_image_tasks_status(client, image_id, status):
+    """Waits for an image tasks to reach a given status."""
+    pending_tasks = []
+    start = int(time.time())
+    while int(time.time()) - start < client.build_timeout:
+        tasks = client.show_image_tasks(image_id)['tasks']
+
+        pending_tasks = [task for task in tasks if task['status'] != status]
+        if not pending_tasks:
+            return tasks
+        time.sleep(client.build_interval)
+
+    message = ('Image %(image_id)s tasks: %(pending_tasks)s '
+               'failed to reach %(status)s state within the required '
+               'time (%(timeout)s s).' % {'image_id': image_id,
+                                          'pending_tasks': pending_tasks,
+                                          'status': status,
+                                          'timeout': client.build_timeout})
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
+def wait_for_image_imported_to_stores(client, image_id, stores=None):
+    """Waits for an image to be imported to all requested stores.
+
+    Short circuits to fail if the serer reports failure of any store.
+    If stores is None, just wait for status==active.
+
+    The client should also have build_interval and build_timeout attributes.
+    """
+
+    exc_cls = lib_exc.TimeoutException
+    start = int(time.time())
+    while int(time.time()) - start < client.build_timeout:
+        image = client.show_image(image_id)
+        if image['status'] == 'active' and (stores is None or
+                                            image['stores'] == stores):
+            return
+        if image.get('os_glance_failed_import'):
+            exc_cls = lib_exc.OtherRestClientException
+            break
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to import on stores: %s' %
+               (image_id, str(image.get('os_glance_failed_import'))))
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise exc_cls(message)
+
+
+def wait_for_image_copied_to_stores(client, image_id):
+    """Waits for an image to be copied on all requested stores.
+
+    The client should also have build_interval and build_timeout attributes.
+    This return the list of stores where copy is failed.
+    """
+
+    start = int(time.time())
+    store_left = []
+    while int(time.time()) - start < client.build_timeout:
+        image = client.show_image(image_id)
+        store_left = image.get('os_glance_importing_to_stores')
+        # NOTE(danms): If os_glance_importing_to_stores is None, then
+        # we've raced with the startup of the task and should continue
+        # to wait.
+        if store_left is not None and not store_left:
+            return image['os_glance_failed_import']
+        if image['status'].lower() == 'killed':
+            raise exceptions.ImageKilledException(image_id=image_id,
+                                                  status=image['status'])
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to finish the copy operation '
+               'on stores: %s' % (image_id, str(store_left)))
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
 def wait_for_volume_resource_status(client, resource_id, status):
     """Waits for a volume resource to reach a given status.
 
@@ -217,6 +314,25 @@
              resource_name, resource_id, status, time.time() - start)
 
 
+def wait_for_volume_attachment_create(client, volume_id, server_id):
+    """Waits for a volume attachment to be created at a given volume."""
+    start = int(time.time())
+    while True:
+        attachments = client.show_volume(volume_id)['volume']['attachments']
+        found = [a for a in attachments if a['server_id'] == server_id]
+        if found:
+            LOG.info('Attachment %s created for volume %s to server %s after '
+                     'waiting for %f seconds', found[0]['attachment_id'],
+                     volume_id, server_id, time.time() - start)
+            return found[0]
+        time.sleep(client.build_interval)
+        if int(time.time()) - start >= client.build_timeout:
+            message = ('Failed to attach volume %s to server %s '
+                       'within the required time (%s s).' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+
+
 def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
     """Waits for a volume attachment to be removed from a given volume."""
     start = int(time.time())
@@ -224,7 +340,7 @@
     while any(attachment_id == a['attachment_id'] for a in attachments):
         time.sleep(client.build_interval)
         if int(time.time()) - start >= client.build_timeout:
-            message = ('Failed to remove attachment %s from volume %s'
+            message = ('Failed to remove attachment %s from volume %s '
                        'within the required time (%s s).' %
                        (attachment_id, volume_id, client.build_timeout))
             raise lib_exc.TimeoutException(message)
@@ -233,6 +349,45 @@
              'seconds', attachment_id, volume_id, time.time() - start)
 
 
+def wait_for_volume_attachment_remove_from_server(
+        client, server_id, volume_id):
+    """Waits for a volume to be removed from a given server.
+
+    This waiter checks the compute API if the volume attachment is removed.
+    """
+    start = int(time.time())
+
+    try:
+        volumes = client.list_volume_attachments(
+            server_id)['volumeAttachments']
+    except lib_exc.NotFound:
+        # Ignore 404s on detach in case the server is deleted or the volume
+        # is already detached.
+        return
+
+    while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+        time.sleep(client.build_interval)
+
+        timed_out = int(time.time()) - start >= client.build_timeout
+        if timed_out:
+            console_output = client.get_console_output(server_id)['output']
+            LOG.debug('Console output for %s\nbody=\n%s',
+                      server_id, console_output)
+            message = ('Volume %s failed to detach from server %s within '
+                       'the required time (%s s) from the compute API '
+                       'perspective' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+        try:
+            volumes = client.list_volume_attachments(
+                server_id)['volumeAttachments']
+        except lib_exc.NotFound:
+            # Ignore 404s on detach in case the server is deleted or the volume
+            # is already detached.
+            return
+    return
+
+
 def wait_for_volume_migration(client, volume_id, new_host):
     """Waits for a Volume to move to a new host."""
     body = client.show_volume(volume_id)['volume']
@@ -334,18 +489,34 @@
     return body
 
 
-def wait_for_interface_detach(client, server_id, port_id):
+def wait_for_interface_detach(client, server_id, port_id, detach_request_id):
     """Waits for an interface to be detached from a server."""
-    body = client.list_interfaces(server_id)['interfaceAttachments']
-    ports = [iface['port_id'] for iface in body]
+    def _get_detach_event_results():
+        # NOTE(gibi): The obvious choice for this waiter would be to wait
+        # until the interface disappears from the client.list_interfaces()
+        # response. However that response is based on the binding status of the
+        # port in Neutron. Nova deallocates the port resources _after the port
+        # was  unbound in Neutron. This can cause that the naive waiter would
+        # return before the port is fully deallocated. Wait instead of the
+        # os-instance-action to succeed as that is recorded after both the
+        # port is fully deallocated.
+        events = client.show_instance_action(
+            server_id, detach_request_id)['instanceAction'].get('events', [])
+        return [
+            event['result'] for event in events
+            if event['event'] == 'compute_detach_interface'
+        ]
+
+    detach_event_results = _get_detach_event_results()
+
     start = int(time.time())
 
-    while port_id in ports:
+    while "Success" not in detach_event_results:
         time.sleep(client.build_interval)
-        body = client.list_interfaces(server_id)['interfaceAttachments']
-        ports = [iface['port_id'] for iface in body]
-        if port_id not in ports:
-            return body
+        detach_event_results = _get_detach_event_results()
+        if "Success" in detach_event_results:
+            return client.show_instance_action(
+                server_id, detach_request_id)['instanceAction']
 
         timed_out = int(time.time()) - start >= client.build_timeout
         if timed_out:
@@ -353,3 +524,62 @@
                        'the required time (%s s)' % (port_id, server_id,
                                                      client.build_timeout))
             raise lib_exc.TimeoutException(message)
+
+
+def wait_for_guest_os_boot(client, server_id):
+    start_time = int(time.time())
+    while True:
+        console_output = client.get_console_output(server_id)['output']
+        for line in console_output.split('\n'):
+            if 'login:' in line.lower():
+                return
+        if int(time.time()) - start_time >= client.build_timeout:
+            LOG.info("Guest OS on server %s probably isn't ready or its "
+                     "console log can't be parsed properly. If guest OS "
+                     "isn't ready, that may cause problems with SSH to "
+                     "the server.",
+                     server_id)
+            return
+        time.sleep(client.build_interval)
+
+
+def wait_for_server_floating_ip(servers_client, server, floating_ip,
+                                wait_for_disassociate=False):
+    """Wait for floating IP association or disassociation.
+
+    :param servers_client: The servers client to use when querying the server's
+    floating IPs.
+    :param server: The server JSON dict on which to wait.
+    :param floating_ip: The floating IP JSON dict on which to wait.
+    :param wait_for_disassociate: Boolean indiating whether to wait for
+    disassociation instead of association.
+    """
+
+    def _get_floating_ip_in_server_addresses(floating_ip, server):
+        for addresses in server['addresses'].values():
+            for address in addresses:
+                if (
+                    address['OS-EXT-IPS:type'] == 'floating' and
+                    address['addr'] == floating_ip['floating_ip_address']
+                ):
+                    return address
+        return None
+
+    start_time = int(time.time())
+    while True:
+        server = servers_client.show_server(server['id'])['server']
+        address = _get_floating_ip_in_server_addresses(floating_ip, server)
+        if address is None and wait_for_disassociate:
+            return None
+        if not wait_for_disassociate and address:
+            return address
+
+        if int(time.time()) - start_time >= servers_client.build_timeout:
+            if wait_for_disassociate:
+                msg = ('Floating ip %s failed to disassociate from server %s '
+                       'in time.' % (floating_ip, server['id']))
+            else:
+                msg = ('Floating ip %s failed to associate with server %s '
+                       'in time.' % (floating_ip, server['id']))
+            raise lib_exc.TimeoutException(msg)
+        time.sleep(servers_client.build_interval)
diff --git a/tempest/config.py b/tempest/config.py
index 5a2d722..662a249 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from __future__ import print_function
-
 import os
 import tempfile
 
@@ -94,7 +92,24 @@
     cfg.StrOpt('admin_domain_name',
                default='Default',
                help="Admin domain name for authentication (Keystone V3). "
-                    "The same domain applies to user and project"),
+                    "The same domain applies to user and project if "
+                    "admin_user_domain_name and admin_project_domain_name "
+                    "are not specified"),
+    cfg.StrOpt('admin_user_domain_name',
+               help="Domain name that contains the admin user (Keystone V3). "
+                    "May be different from admin_project_domain_name and "
+                    "admin_domain_name"),
+    cfg.StrOpt('admin_project_domain_name',
+               help="Domain name that contains the project given by "
+                    "admin_project_name (Keystone V3). May be different from "
+                    "admin_user_domain_name and admin_domain_name"),
+    cfg.StrOpt('admin_system',
+               default=None,
+               help="The system scope on which an admin user has an admin "
+                    "role assignment, if any. Valid values are 'all' or None. "
+                    "This must be set to 'all' if using the "
+                    "[oslo_policy]/enforce_scope=true option for the "
+                    "identity service."),
 ]
 
 identity_group = cfg.OptGroup(name='identity',
@@ -244,14 +259,23 @@
                 help='Does the environment have the security compliance '
                      'settings enabled?'),
     cfg.BoolOpt('project_tags',
-                default=False,
-                help='Is the project tags identity v3 API available?'),
-    # Application credentials is a default feature in Queens. This config
-    # option can removed once Pike is EOL.
+                default=True,
+                help='Is the project tags identity v3 API available?',
+                deprecated_for_removal=True,
+                deprecated_reason='Project tags API is a default feature '
+                                  'since Queens'),
     cfg.BoolOpt('application_credentials',
-                default=False,
+                default=True,
                 help='Does the environment have application credentials '
-                     'enabled?'),
+                     'enabled?',
+                deprecated_for_removal=True,
+                deprecated_reason='Application credentials is a default '
+                                  'feature since Queens'),
+    # Access rules for application credentials is a default feature in Train.
+    # This config option can removed once Stein is EOL.
+    cfg.BoolOpt('access_rules',
+                default=False,
+                help='Does the environment have access rules enabled?'),
     cfg.BoolOpt('immutable_user_source',
                 default=False,
                 help='Set to True if the environment has a read-only '
@@ -417,6 +441,15 @@
     cfg.BoolOpt('disk_config',
                 default=True,
                 help="If false, skip disk config tests"),
+    # TODO(pkesav): Make it True by default once wallaby
+    # is oldest supported stable for Tempest.
+    cfg.BoolOpt('hostname_fqdn_sanitization',
+                default=False,
+                help="If false, skip fqdn instance sanitization tests. "
+                     "Nova started sanitizing the instance name by replacing "
+                     "the '.' with '-' to comply with fqdn hostname. Nova "
+                     "changed that in Wallaby cycle, if your cloud is older "
+                     "than wallaby then you can keep/make it False."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help='A list of enabled compute extensions with a special '
@@ -449,6 +482,10 @@
     cfg.BoolOpt('shelve',
                 default=True,
                 help="Does the test environment support shelving/unshelving?"),
+    cfg.BoolOpt('shelve_migrate',
+                default=False,
+                help="Does the test environment support "
+                     "cold migration of unshelved server?"),
     cfg.BoolOpt('suspend',
                 default=True,
                 help="Does the test environment support suspend/resume?"),
@@ -483,6 +520,12 @@
                                   'MIN_LIBVIRT_VERSION is >= 1.2.17 on all '
                                   'branches from stable/rocky and will be '
                                   'removed in a future release.'),
+    cfg.BoolOpt('can_migrate_between_any_hosts',
+                default=True,
+                help="Does the test environment support migrating between "
+                     "any hosts? In environments with non-homogeneous compute "
+                     "nodes you can set this to False so that it will select "
+                     "destination host for migrating automatically"),
     cfg.BoolOpt('vnc_console',
                 default=False,
                 help='Enable VNC console. This configuration value should '
@@ -521,6 +564,10 @@
                 default=True,
                 help='Does the test environment support instance rescue '
                      'mode?'),
+    cfg.BoolOpt('stable_rescue',
+                default=False,
+                help='Does the test environment support stable device '
+                     'instance rescue mode?'),
     cfg.BoolOpt('enable_instance_password',
                 default=True,
                 help='Enables returning of the instance password by the '
@@ -590,6 +637,22 @@
                 help='Does the test environment support attaching a volume to '
                      'more than one instance? This depends on hypervisor and '
                      'volume backend/type and compute API version 2.60.'),
+    cfg.BoolOpt('xenapi_apis',
+                default=False,
+                help='Does the test environment support the XenAPI-specific '
+                     'APIs: os-agents, writeable server metadata and the '
+                     'resetNetwork server action? '
+                     'These were removed in Victoria alongside the XenAPI '
+                     'virt driver.',
+                deprecated_for_removal=True,
+                deprecated_reason="On Nova side, XenAPI virt driver and the "
+                                  "APIs that only worked with that driver "
+                                  "have been removed and there's nothing to "
+                                  "test after Ussuri."),
+    cfg.BoolOpt('ide_bus',
+                default=True,
+                help='Does the test environment support attaching devices '
+                     'using an IDE bus to the instance?'),
 ]
 
 
@@ -611,6 +674,15 @@
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the image service."),
+    cfg.StrOpt('alternate_image_endpoint',
+               default=None,
+               help="Alternate endpoint name for cross-worker testing"),
+    cfg.StrOpt('alternate_image_endpoint_type',
+               default='publicURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help=("The endpoint type to use for the alternate image "
+                     "service.")),
     cfg.StrOpt('http_image',
                default='http://download.cirros-cloud.net/0.3.1/'
                'cirros-0.3.1-x86_64-uec.tar.gz',
@@ -654,6 +726,19 @@
                                   'are current one. In future, Tempest will '
                                   'test v2 APIs only so this config option '
                                   'will be removed.'),
+    # Image import feature is setup in devstack victoria onwards.
+    # Once all stable branches setup the same via glance standalone
+    # mode or with uwsgi, we can remove this config option.
+    cfg.BoolOpt('import_image',
+                default=False,
+                help="Is image import feature enabled"),
+    # NOTE(danms): Starting mid-Wallaby glance began enforcing the
+    # previously-informal requirement that os_glance_* properties are
+    # reserved for internal use. Thus, we can only run these checks
+    # if we know we are on a new enough glance.
+    cfg.BoolOpt('os_glance_reserved',
+                default=False,
+                help="Should we check that os_glance namespace is reserved"),
 ]
 
 network_group = cfg.OptGroup(name='network',
@@ -699,6 +784,11 @@
     cfg.StrOpt('floating_network_name',
                help="Default floating network name. Used to allocate floating "
                     "IPs when neutron is enabled."),
+    cfg.StrOpt('subnet_id',
+               default="",
+               help="Subnet id of subnet which is used for allocation of "
+                    "floating IPs. Specify when two or more subnets are "
+                    "present in network."),
     cfg.StrOpt('public_router_id',
                default="",
                help="Id of the public router that provides external "
@@ -721,11 +811,13 @@
                 deprecated_reason="This config option is no longer "
                                   "used anywhere, so it can be removed."),
     cfg.StrOpt('port_vnic_type',
-               choices=[None, 'normal', 'direct', 'macvtap'],
+               choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
+                        'baremetal', 'virtio-forwarder'],
                help="vnic_type to use when launching instances"
                     " with pre-configured ports."
                     " Supported ports are:"
-                    " ['normal','direct','macvtap']"),
+                    " ['normal', 'direct', 'macvtap', 'direct-physical', "
+                    "'baremetal', 'virtio-forwarder']"),
     cfg.Opt('port_profile',
             type=ProfileType,
             default={},
@@ -747,29 +839,37 @@
 NetworkFeaturesGroup = [
     cfg.BoolOpt('ipv6',
                 default=True,
-                help="Allow the execution of IPv6 tests"),
+                help="Allow the execution of IPv6 tests."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help="A list of enabled network extensions with a special "
                      "entry all which indicates every extension is enabled. "
                      "Empty list indicates all extensions are disabled. "
-                     "To get the list of extensions run: 'neutron ext-list'"),
+                     "To get the list of extensions run: "
+                     "'openstack extension list --network'"),
+    cfg.ListOpt('available_features',
+                default=['all'],
+                help="A list of available network features with a special "
+                     "entry all that indicates every feature is available. "
+                     "Empty list indicates all features are disabled. "
+                     "This list can contain features that are not "
+                     "discoverable through the API."),
     cfg.BoolOpt('ipv6_subnet_attributes',
                 default=False,
                 help="Allow the execution of IPv6 subnet tests that use "
                      "the extended IPv6 attributes ipv6_ra_mode "
-                     "and ipv6_address_mode"
+                     "and ipv6_address_mode."
                 ),
     cfg.BoolOpt('port_admin_state_change',
                 default=True,
-                help="Does the test environment support changing"
-                     " port admin state"),
+                help="Does the test environment support changing "
+                     "port admin state?"),
     cfg.BoolOpt('port_security',
                 default=False,
                 help="Does the test environment support port security?"),
     cfg.BoolOpt('floating_ips',
                 default=True,
-                help='Does the test environment support floating_ips'),
+                help='Does the test environment support floating_ips?'),
     cfg.StrOpt('qos_placement_physnet', default=None,
                help='Name of the physnet for placement based minimum '
                     'bandwidth allocation.'),
@@ -778,6 +878,18 @@
                     'This value will be increased in case of conflict.')
 ]
 
+dashboard_group = cfg.OptGroup(name="dashboard",
+                               title="Dashboard options")
+
+DashboardGroup = [
+    cfg.StrOpt('dashboard_url',
+               default='http://localhost/',
+               help="Where the dashboard can be found"),
+    cfg.BoolOpt('disable_ssl_certificate_validation',
+                default=False,
+                help="Set to True if using self-signed SSL certificates."),
+]
+
 validation_group = cfg.OptGroup(name='validation',
                                 title='SSH Validation options')
 
@@ -822,9 +934,17 @@
     cfg.StrOpt('image_ssh_user',
                default="root",
                help="User name used to authenticate to an instance."),
+    cfg.StrOpt('image_alt_ssh_user',
+               default="root",
+               help="User name used to authenticate to an alt instance."),
     cfg.StrOpt('image_ssh_password',
                default="password",
-               help="Password used to authenticate to an instance."),
+               help="Password used to authenticate to an instance.",
+               secret=True),
+    cfg.StrOpt('image_alt_ssh_password',
+               default="password",
+               help="Password used to authenticate to an alt instance.",
+               secret=True),
     cfg.StrOpt('ssh_shell_prologue',
                default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
                help="Shell fragments to use before executing a command "
@@ -878,6 +998,9 @@
                 default=['BACKEND_1', 'BACKEND_2'],
                 help='A list of backend names separated by comma. '
                      'The backend name must be declared in cinder.conf'),
+    cfg.StrOpt('volume_type',
+               default='',
+               help='Volume type to be used while creating volume.'),
     cfg.StrOpt('storage_protocol',
                default='iSCSI',
                help='Backend protocol to target when creating volume types'),
@@ -890,6 +1013,11 @@
     cfg.IntOpt('volume_size',
                default=1,
                help='Default size in GB for volumes created by volumes tests'),
+    cfg.IntOpt('volume_size_extend',
+               default=1,
+               help="Size in GB a volume is extended by - if a test "
+                    "extends a volume, the size of the new volume will be "
+                    "volume_size + volume_size_extend."),
     cfg.ListOpt('manage_volume_ref',
                 default=['source-name', 'volume-%s'],
                 help="A reference to existing volume for volume manage. "
@@ -949,38 +1077,21 @@
                 help='A list of enabled volume extensions with a special '
                      'entry all which indicates every extension is enabled. '
                      'Empty list indicates all extensions are disabled'),
-    cfg.BoolOpt('api_v2',
-                default=True,
-                help="Is the v2 volume API enabled",
-                deprecated_for_removal=True,
-                deprecated_reason="The v2 volume API has been deprecated "
-                                  "since Pike release. Now Tempest run all "
-                                  "the volume tests against v2 or v3 API "
-                                  "based on CONF.volume.catalog_type which "
-                                  "makes this config option unusable. If "
-                                  "catalog_type is volumev2, then all the "
-                                  "volume tests will run against v2 API. "
-                                  "Use ``CONF.volume.catalog_type`` to run "
-                                  "the Tempest against volume v2 or v3 API"),
-    cfg.BoolOpt('api_v3',
-                default=True,
-                help="Is the v3 volume API enabled",
-                deprecated_for_removal=True,
-                deprecated_reason="Tempest run all the volume tests against "
-                                  "v2 or v3 API based on "
-                                  "CONF.volume.catalog_type which makes this "
-                                  "config option unusable. If catalog_type is "
-                                  "volumev3 which is default, then all the "
-                                  "volume tests will run against v3 API. "
-                                  "Use ``CONF.volume.catalog_type`` to run "
-                                  "the Tempest against volume v2 or v3 API"),
     cfg.BoolOpt('extend_attached_volume',
                 default=False,
                 help='Does the cloud support extending the size of a volume '
                      'which is currently attached to a server instance? This '
                      'depends on the 3.42 volume API microversion and the '
                      '2.51 compute API microversion. Also, not all volume or '
-                     'compute backends support this operation.')
+                     'compute backends support this operation.'),
+    cfg.BoolOpt('extend_attached_encrypted_volume',
+                default=False,
+                help='Does the cloud support extending the size of an '
+                     'encrypted volume  which is currently attached to a '
+                     'server instance? This depends on the 3.42 volume API '
+                     'microversion and the 2.51 compute API microversion. '
+                     'Also, not all volume or compute backends support this '
+                     'operation.')
 ]
 
 
@@ -1011,7 +1122,7 @@
                help="Number of seconds to wait while looping to check the "
                     "status of a container to container synchronization"),
     cfg.StrOpt('operator_role',
-               default='Member',
+               default='member',
                help="Role to add to users created for swift tests to "
                     "enable creating containers"),
     cfg.StrOpt('reseller_admin_role',
@@ -1055,14 +1166,10 @@
 scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
 
 ScenarioGroup = [
-    cfg.StrOpt('img_dir',
-               default='/opt/stack/new/devstack/files/images/'
-               'cirros-0.3.1-x86_64-uec',
-               help='Directory containing image files',
-               deprecated_for_removal=True),
     cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
-               default='cirros-0.3.1-x86_64-disk.img',
-               help='Image file name'),
+               default='/opt/stack/new/devstack/files/images'
+               '/cirros-0.3.1-x86_64-disk.img',
+               help='Image full path.'),
     cfg.StrOpt('img_disk_format',
                default='qcow2',
                help='Image disk format'),
@@ -1071,18 +1178,6 @@
                help='Image container format'),
     cfg.DictOpt('img_properties', help='Glance image properties. '
                 'Use for custom images which require them'),
-    cfg.StrOpt('ami_img_file',
-               default='cirros-0.3.1-x86_64-blank.img',
-               help='AMI image file name',
-               deprecated_for_removal=True),
-    cfg.StrOpt('ari_img_file',
-               default='cirros-0.3.1-x86_64-initrd',
-               help='ARI image file name',
-               deprecated_for_removal=True),
-    cfg.StrOpt('aki_img_file',
-               default='cirros-0.3.1-x86_64-vmlinuz',
-               help='AKI image file name',
-               deprecated_for_removal=True),
     # TODO(yfried): add support for dhcpcd
     cfg.StrOpt('dhcp_client',
                default='udhcpc',
@@ -1117,6 +1212,42 @@
     cfg.BoolOpt('nova',
                 default=True,
                 help="Whether or not nova is expected to be available"),
+    cfg.BoolOpt('horizon',
+                default=True,
+                help="Whether or not horizon is expected to be available"),
+]
+
+enforce_scope_group = cfg.OptGroup(name="enforce_scope",
+                                   title="OpenStack Services with "
+                                         "enforce scope")
+
+
+EnforceScopeGroup = [
+    cfg.BoolOpt('nova',
+                default=False,
+                help='Does the compute service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'nova.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('neutron',
+                default=False,
+                help='Does the network service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'neutron.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('glance',
+                default=False,
+                help='Does the Image service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'glance.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('cinder',
+                default=False,
+                help='Does the Volume service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'cinder.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('keystone',
+                default=False,
+                help='Does the Identity service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'keystone.conf: [oslo_policy].enforce_scope option.'),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
@@ -1164,7 +1295,7 @@
 
 The best use case is investigating used resources of one test.
 A test can be run as follows:
- $ ostestr --pdb TEST_ID
+ $ stestr run --pdb TEST_ID
 or
  $ python -m testtools.run TEST_ID"""),
 ]
@@ -1180,6 +1311,7 @@
     (image_feature_group, ImageFeaturesGroup),
     (network_group, NetworkGroup),
     (network_feature_group, NetworkFeaturesGroup),
+    (dashboard_group, DashboardGroup),
     (validation_group, ValidationGroup),
     (volume_group, VolumeGroup),
     (volume_feature_group, VolumeFeaturesGroup),
@@ -1187,6 +1319,7 @@
     (object_storage_feature_group, ObjectStoreFeaturesGroup),
     (scenario_group, ScenarioGroup),
     (service_available_group, ServiceAvailableGroup),
+    (enforce_scope_group, EnforceScopeGroup),
     (debug_group, DebugGroup),
     (placement_group, PlacementGroup),
     (profiler_group, ProfilerGroup),
@@ -1247,6 +1380,7 @@
         self.image_feature_enabled = _CONF['image-feature-enabled']
         self.network = _CONF.network
         self.network_feature_enabled = _CONF['network-feature-enabled']
+        self.dashboard = _CONF.dashboard
         self.validation = _CONF.validation
         self.volume = _CONF.volume
         self.volume_feature_enabled = _CONF['volume-feature-enabled']
@@ -1255,6 +1389,7 @@
             'object-storage-feature-enabled']
         self.scenario = _CONF.scenario
         self.service_available = _CONF.service_available
+        self.enforce_scope = _CONF.enforce_scope
         self.debug = _CONF.debug
         logging.tempest_set_log_file('tempest.log')
         # Setting attributes for plugins
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 2c40cb1..c1e6b2d 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -15,6 +15,7 @@
 import os
 import re
 
+from hacking import core
 import pycodestyle
 
 
@@ -25,7 +26,6 @@
 TEST_DEFINITION = re.compile(r'^\s*def test.*')
 SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
 SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
-VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
 RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
 mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
 TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
@@ -39,6 +39,7 @@
 _HAVE_NEGATIVE_DECORATOR = False
 
 
+@core.flake8ext
 def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
     """Check for client imports from tempest/api & tempest/scenario tests
 
@@ -53,6 +54,7 @@
                      " in tempest/api/* or tempest/scenario/* tests"))
 
 
+@core.flake8ext
 def scenario_tests_need_service_tags(physical_line, filename,
                                      previous_logical):
     """Check that scenario tests have service tags
@@ -67,6 +69,7 @@
                         "T104: Scenario tests require a service decorator")
 
 
+@core.flake8ext
 def no_setup_teardown_class_for_tests(physical_line, filename):
 
     if pycodestyle.noqa(physical_line):
@@ -80,20 +83,7 @@
                 "T105: (setUp|tearDown)Class can not be used in tests")
 
 
-def no_vi_headers(physical_line, line_number, lines):
-    """Check for vi editor configuration in source files.
-
-    By default vi modelines can only appear in the first or
-    last 5 lines of a source file.
-
-    T106
-    """
-    # NOTE(gilliard): line_number is 1-indexed
-    if line_number <= 5 or line_number > len(lines) - 5:
-        if VI_HEADER_RE.match(physical_line):
-            return 0, "T106: Don't put vi configuration in source files"
-
-
+@core.flake8ext
 def service_tags_not_in_module_path(physical_line, filename):
     """Check that a service tag isn't in the module path
 
@@ -117,6 +107,7 @@
                             "T107: service tag should not be in path")
 
 
+@core.flake8ext
 def no_hyphen_at_end_of_rand_name(logical_line, filename):
     """Check no hyphen at the end of rand_name() argument
 
@@ -127,6 +118,7 @@
         return 0, msg
 
 
+@core.flake8ext
 def no_mutable_default_args(logical_line):
     """Check that mutable object isn't used as default argument
 
@@ -137,6 +129,7 @@
         yield (0, msg)
 
 
+@core.flake8ext
 def no_testtools_skip_decorator(logical_line):
     """Check that methods do not have the testtools.skip decorator
 
@@ -147,20 +140,10 @@
                "decorators.skip_because from tempest.lib")
 
 
-def _common_service_clients_check(logical_line, physical_line, filename,
-                                  ignored_list_file=None):
+def _common_service_clients_check(logical_line, physical_line, filename):
     if not re.match('tempest/(lib/)?services/.*', filename):
         return False
 
-    if ignored_list_file is not None:
-        ignored_list = []
-        with open('tempest/hacking/' + ignored_list_file) as f:
-            for line in f:
-                ignored_list.append(line.strip())
-
-        if filename in ignored_list:
-            return False
-
     if not METHOD.match(physical_line):
         return False
 
@@ -170,14 +153,15 @@
     return True
 
 
-def get_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def get_resources_on_service_clients(physical_line, logical_line, filename,
                                      line_number, lines):
     """Check that service client names of GET should be consistent
 
     T110
     """
     if not _common_service_clients_check(logical_line, physical_line,
-                                         filename, 'ignored_list_T110.txt'):
+                                         filename):
         return
 
     for line in lines[line_number:]:
@@ -197,14 +181,15 @@
         yield (0, msg)
 
 
-def delete_resources_on_service_clients(logical_line, physical_line, filename,
+@core.flake8ext
+def delete_resources_on_service_clients(physical_line, logical_line, filename,
                                         line_number, lines):
     """Check that service client names of DELETE should be consistent
 
     T111
     """
     if not _common_service_clients_check(logical_line, physical_line,
-                                         filename, 'ignored_list_T111.txt'):
+                                         filename):
         return
 
     for line in lines[line_number:]:
@@ -223,6 +208,7 @@
         yield (0, msg)
 
 
+@core.flake8ext
 def dont_import_local_tempest_into_lib(logical_line, filename):
     """Check that tempest.lib should not import local tempest code
 
@@ -244,6 +230,7 @@
     yield (0, msg)
 
 
+@core.flake8ext
 def use_rand_uuid_instead_of_uuid4(logical_line, filename):
     """Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
 
@@ -260,6 +247,7 @@
     yield (0, msg)
 
 
+@core.flake8ext
 def dont_use_config_in_tempest_lib(logical_line, filename):
     """Check that tempest.lib doesn't use tempest config
 
@@ -277,7 +265,8 @@
         yield(0, msg)
 
 
-def dont_put_admin_tests_on_nonadmin_path(logical_line, physical_line,
+@core.flake8ext
+def dont_put_admin_tests_on_nonadmin_path(logical_line,
                                           filename):
     """Check admin tests should exist under admin path
 
@@ -287,9 +276,6 @@
     if 'tempest/api/' not in filename:
         return
 
-    if pycodestyle.noqa(physical_line):
-        return
-
     if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
         return
 
@@ -298,6 +284,7 @@
         yield(0, msg)
 
 
+@core.flake8ext
 def unsupported_exception_attribute_PY3(logical_line):
     """Check Unsupported 'message' exception attribute in PY3
 
@@ -309,6 +296,7 @@
         yield(0, msg)
 
 
+@core.flake8ext
 def negative_test_attribute_always_applied_to_negative_tests(physical_line,
                                                              filename):
     """Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
@@ -330,22 +318,3 @@
                        " to all negative API tests"
                 )
             _HAVE_NEGATIVE_DECORATOR = False
-
-
-def factory(register):
-    register(import_no_clients_in_api_and_scenario_tests)
-    register(scenario_tests_need_service_tags)
-    register(no_setup_teardown_class_for_tests)
-    register(no_vi_headers)
-    register(service_tags_not_in_module_path)
-    register(no_hyphen_at_end_of_rand_name)
-    register(no_mutable_default_args)
-    register(no_testtools_skip_decorator)
-    register(get_resources_on_service_clients)
-    register(delete_resources_on_service_clients)
-    register(dont_import_local_tempest_into_lib)
-    register(dont_use_config_in_tempest_lib)
-    register(use_rand_uuid_instead_of_uuid4)
-    register(dont_put_admin_tests_on_nonadmin_path)
-    register(unsupported_exception_attribute_PY3)
-    register(negative_test_attribute_always_applied_to_negative_tests)
diff --git a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
index 28ed816..8aed37d 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/parameter_types.py
@@ -120,3 +120,10 @@
     # 7: SUSPENDED
     'enum': [0, 1, 3, 4, 6, 7]
 }
+
+uuid_or_null = {
+    'anyOf': [
+        {'type': 'string', 'format': 'uuid'},
+        {'type': 'null'}
+    ]
+}
diff --git a/tempest/lib/api_schema/response/compute/v2_16/servers.py b/tempest/lib/api_schema/response/compute/v2_16/servers.py
index fc81ff7..dcd64cf 100644
--- a/tempest/lib/api_schema/response/compute/v2_16/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_16/servers.py
@@ -171,3 +171,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index b6c3c14..0e4bd5c 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -61,3 +61,4 @@
 attach_volume = copy.deepcopy(serversv216.attach_volume)
 show_volume_attachment = copy.deepcopy(serversv216.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
+show_instance_action = copy.deepcopy(serversv216.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_26/servers.py b/tempest/lib/api_schema/response/compute/v2_26/servers.py
index 5a0f987..74c08f1 100644
--- a/tempest/lib/api_schema/response/compute/v2_26/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_26/servers.py
@@ -104,3 +104,4 @@
 attach_volume = copy.deepcopy(servers219.attach_volume)
 show_volume_attachment = copy.deepcopy(servers219.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers219.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers219.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_3/servers.py b/tempest/lib/api_schema/response/compute/v2_3/servers.py
index 1674c1b..435e3ac 100644
--- a/tempest/lib/api_schema/response/compute/v2_3/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_3/servers.py
@@ -176,3 +176,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index d580f2c..7050602 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -69,3 +69,4 @@
 attach_volume = copy.deepcopy(servers226.attach_volume)
 show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers226.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_48/servers.py b/tempest/lib/api_schema/response/compute/v2_48/servers.py
index e2e45bc..af6344b 100644
--- a/tempest/lib/api_schema/response/compute/v2_48/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_48/servers.py
@@ -132,3 +132,4 @@
 attach_volume = copy.deepcopy(servers247.attach_volume)
 show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers247.show_instance_action)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_51/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_51/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_51/servers.py b/tempest/lib/api_schema/response/compute/v2_51/servers.py
new file mode 100644
index 0000000..e603287
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_51/servers.py
@@ -0,0 +1,42 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+
+# microversion 2.51 made events a mandatory field in the response
+show_instance_action = copy.deepcopy(servers248.show_instance_action)
+show_instance_action['response_body'][
+    'properties']['instanceAction']['required'].append('events')
+
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers248.list_servers)
+show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
+list_tags = copy.deepcopy(servers248.list_tags)
+update_all_tags = copy.deepcopy(servers248.update_all_tags)
+delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
+update_tag = copy.deepcopy(servers248.update_tag)
+delete_tag = copy.deepcopy(servers248.delete_tag)
+get_server = copy.deepcopy(servers248.get_server)
+list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
+update_server = copy.deepcopy(servers248.update_server)
+rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers248.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers248.attach_volume)
+show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_54/servers.py b/tempest/lib/api_schema/response/compute/v2_54/servers.py
index 2c2bff0..135b381 100644
--- a/tempest/lib/api_schema/response/compute/v2_54/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_54/servers.py
@@ -12,7 +12,7 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as servers251
 # ****** Schemas changed in microversion 2.54 *****************
 
 # Note(gmann): This is schema for microversion 2.54 which includes the
@@ -26,14 +26,14 @@
     ]
 }
 
-rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server = copy.deepcopy(servers251.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'key_name': key_name})
 rebuild_server['response_body']['properties']['server'][
     'required'].append('key_name')
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers248.rebuild_server_with_admin_pass)
+    servers251.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'key_name': key_name})
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
@@ -43,18 +43,19 @@
 # to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
 # ****** Schemas unchanged in microversion 2.54 since microversion 2.48 ***
-get_server = copy.deepcopy(servers248.get_server)
-list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
-update_server = copy.deepcopy(servers248.update_server)
-list_servers = copy.deepcopy(servers248.list_servers)
-show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
-list_tags = copy.deepcopy(servers248.list_tags)
-update_all_tags = copy.deepcopy(servers248.update_all_tags)
-delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
-update_tag = copy.deepcopy(servers248.update_tag)
-delete_tag = copy.deepcopy(servers248.delete_tag)
-attach_volume = copy.deepcopy(servers248.attach_volume)
-show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
+get_server = copy.deepcopy(servers251.get_server)
+list_servers_detail = copy.deepcopy(servers251.list_servers_detail)
+update_server = copy.deepcopy(servers251.update_server)
+list_servers = copy.deepcopy(servers251.list_servers)
+show_server_diagnostics = copy.deepcopy(servers251.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers251.get_remote_consoles)
+list_tags = copy.deepcopy(servers251.list_tags)
+update_all_tags = copy.deepcopy(servers251.update_all_tags)
+delete_all_tags = copy.deepcopy(servers251.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers251.check_tag_existence)
+update_tag = copy.deepcopy(servers251.update_tag)
+delete_tag = copy.deepcopy(servers251.delete_tag)
+attach_volume = copy.deepcopy(servers251.attach_volume)
+show_volume_attachment = copy.deepcopy(servers251.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers251.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers251.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
index aa57d25..bdff74b 100644
--- a/tempest/lib/api_schema/response/compute/v2_57/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -62,3 +62,4 @@
 attach_volume = copy.deepcopy(servers254.attach_volume)
 show_volume_attachment = copy.deepcopy(servers254.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers254.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers254.show_instance_action)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_58/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_58/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_58/servers.py b/tempest/lib/api_schema/response/compute/v2_58/servers.py
new file mode 100644
index 0000000..62239cf
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_58/servers.py
@@ -0,0 +1,44 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
+# microversion 2.58 added updated_at to the response
+show_instance_action = copy.deepcopy(servers257.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['updated_at'] = parameter_types.date_time
+show_instance_action['response_body']['properties']['instanceAction'][
+    'required'].append('updated_at')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers257.list_servers)
+show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
+list_tags = copy.deepcopy(servers257.list_tags)
+update_all_tags = copy.deepcopy(servers257.update_all_tags)
+delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
+update_tag = copy.deepcopy(servers257.update_tag)
+delete_tag = copy.deepcopy(servers257.delete_tag)
+get_server = copy.deepcopy(servers257.get_server)
+list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+update_server = copy.deepcopy(servers257.update_server)
+rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers257.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers257.attach_volume)
+show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_59/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_59/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_59/migrations.py b/tempest/lib/api_schema/response/compute/v2_59/migrations.py
new file mode 100644
index 0000000..a37c0f1
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_59/migrations.py
@@ -0,0 +1,36 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_23 import migrations
+
+###########################################################################
+#
+# 2.59:
+#
+# The uuid value is now returned in the response body in addition to the
+# migration id for the following API responses:
+#
+# - GET /os-migrations
+# - GET /servers/{server_id}/migrations/{migration_id}
+# - GET /servers/{server_id}/migrations
+#
+###########################################################################
+
+uuid = {'type': 'string', 'format': 'uuid'}
+
+list_migrations = copy.deepcopy(migrations.list_migrations)
+list_migrations['response_body']['properties']['migrations']['items'][
+    'properties'].update({'uuid': uuid})
+list_migrations['response_body']['properties']['migrations']['items'][
+    'required'].append('uuid')
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index 922bf79..6103b7c 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -31,6 +31,7 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
 
 # NOTE: The consolidated remote console API got introduced with v2.6
 # with bp/consolidate-console-api. See Nova commit 578bafeda
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_62/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_62/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_62/servers.py b/tempest/lib/api_schema/response/compute/v2_62/servers.py
new file mode 100644
index 0000000..23eebbb
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_62/servers.py
@@ -0,0 +1,47 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_58 import servers as servers258
+
+# microversion 2.62 added hostId and host to the event, but only hostId is
+# mandatory
+show_instance_action = copy.deepcopy(servers258.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items'][
+    'properties']['hostId'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items']['properties']['host'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+    'properties']['events']['items']['required'].append('hostId')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers258.list_servers)
+show_server_diagnostics = copy.deepcopy(servers258.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers258.get_remote_consoles)
+list_tags = copy.deepcopy(servers258.list_tags)
+update_all_tags = copy.deepcopy(servers258.update_all_tags)
+delete_all_tags = copy.deepcopy(servers258.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers258.check_tag_existence)
+update_tag = copy.deepcopy(servers258.update_tag)
+delete_tag = copy.deepcopy(servers258.delete_tag)
+get_server = copy.deepcopy(servers258.get_server)
+list_servers_detail = copy.deepcopy(servers258.list_servers_detail)
+update_server = copy.deepcopy(servers258.update_server)
+rebuild_server = copy.deepcopy(servers258.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers258.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers258.attach_volume)
+show_volume_attachment = copy.deepcopy(servers258.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers258.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
index 01910aa..db713b1 100644
--- a/tempest/lib/api_schema/response/compute/v2_63/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -12,7 +12,7 @@
 
 import copy
 
-from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+from tempest.lib.api_schema.response.compute.v2_62 import servers as servers262
 
 # Nova microversion 2.63 adds 'trusted_image_certificates' (a list of
 # certificate IDs) to the server rebuild and servers details responses.
@@ -29,32 +29,32 @@
     }
 }
 
-list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+list_servers_detail = copy.deepcopy(servers262.list_servers_detail)
 list_servers_detail['response_body']['properties']['servers']['items'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 list_servers_detail['response_body']['properties']['servers']['items'][
     'required'].append('trusted_image_certificates')
 
-rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server = copy.deepcopy(servers262.rebuild_server)
 rebuild_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 rebuild_server['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
 rebuild_server_with_admin_pass = copy.deepcopy(
-    servers257.rebuild_server_with_admin_pass)
+    servers262.rebuild_server_with_admin_pass)
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 rebuild_server_with_admin_pass['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
-update_server = copy.deepcopy(servers257.update_server)
+update_server = copy.deepcopy(servers262.update_server)
 update_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 update_server['response_body']['properties']['server'][
     'required'].append('trusted_image_certificates')
 
-get_server = copy.deepcopy(servers257.get_server)
+get_server = copy.deepcopy(servers262.get_server)
 get_server['response_body']['properties']['server'][
     'properties'].update({'trusted_image_certificates': trusted_certs})
 get_server['response_body']['properties']['server'][
@@ -64,15 +64,16 @@
 # to keep this schema in this file to have the generic way to select the
 # right schema based on self.schema_versions_info mapping in service client.
 # ****** Schemas unchanged since microversion 2.57 ***
-list_servers = copy.deepcopy(servers257.list_servers)
-show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
-list_tags = copy.deepcopy(servers257.list_tags)
-update_all_tags = copy.deepcopy(servers257.update_all_tags)
-delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
-update_tag = copy.deepcopy(servers257.update_tag)
-delete_tag = copy.deepcopy(servers257.delete_tag)
-attach_volume = copy.deepcopy(servers257.attach_volume)
-show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
+list_servers = copy.deepcopy(servers262.list_servers)
+show_server_diagnostics = copy.deepcopy(servers262.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers262.get_remote_consoles)
+list_tags = copy.deepcopy(servers262.list_tags)
+update_all_tags = copy.deepcopy(servers262.update_all_tags)
+delete_all_tags = copy.deepcopy(servers262.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers262.check_tag_existence)
+update_tag = copy.deepcopy(servers262.update_tag)
+delete_tag = copy.deepcopy(servers262.delete_tag)
+attach_volume = copy.deepcopy(servers262.attach_volume)
+show_volume_attachment = copy.deepcopy(servers262.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers262.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers262.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/interfaces.py b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
new file mode 100644
index 0000000..3160b92
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import interfaces
+
+# ****** Schemas changed in microversion 2.70 *****************
+#
+# 1. add optional field 'tag' in the Response body of the following APIs:
+#    - GET /servers/{server_id}/os-interface
+#    - POST /servers/{server_id}/os-interface
+#    - GET /servers/{server_id}/os-interface/{port_id}
+
+get_create_interfaces = copy.deepcopy(interfaces.get_create_interfaces)
+get_create_interfaces['response_body']['properties']['interfaceAttachment'][
+    'properties'].update({'tag': {'type': ['string', 'null']}})
+
+list_interfaces = copy.deepcopy(interfaces.list_interfaces)
+list_interfaces['response_body']['properties']['interfaceAttachments'][
+    'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.1 ***
+delete_interface = copy.deepcopy(interfaces.delete_interface)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/servers.py b/tempest/lib/api_schema/response/compute/v2_70/servers.py
index 5ca4cc8..6103923 100644
--- a/tempest/lib/api_schema/response/compute/v2_70/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_70/servers.py
@@ -78,3 +78,4 @@
 check_tag_existence = copy.deepcopy(servers263.check_tag_existence)
 update_tag = copy.deepcopy(servers263.update_tag)
 delete_tag = copy.deepcopy(servers263.delete_tag)
+show_instance_action = copy.deepcopy(servers263.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 5cf0f8a..3e55c1c 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -79,3 +79,7 @@
 check_tag_existence = copy.deepcopy(servers270.check_tag_existence)
 update_tag = copy.deepcopy(servers270.update_tag)
 delete_tag = copy.deepcopy(servers270.delete_tag)
+attach_volume = copy.deepcopy(servers270.attach_volume)
+show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers270.show_instance_action)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_73/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_73/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
new file mode 100644
index 0000000..e7a1d87
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -0,0 +1,82 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_71 import servers as servers271
+
+
+###########################################################################
+#
+# 2.73:
+#
+# The locked_reason parameter is now returned in the response body of the
+# following calls:
+#
+# - POST /servers/{server_id}/action (where the action is rebuild)
+# - PUT /servers/{server_id} (update)
+# - GET /servers/{server_id} (show)
+# - GET /servers/detail (list)
+#
+###########################################################################
+
+# The "locked_reason" parameter will either be a string or None.
+locked_reason = {'type': ['string', 'null']}
+
+rebuild_server = copy.deepcopy(servers271.rebuild_server)
+rebuild_server['response_body']['properties']['server'][
+    'properties'].update({'locked_reason': locked_reason})
+rebuild_server['response_body']['properties']['server'][
+    'required'].append('locked_reason')
+
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers271.rebuild_server_with_admin_pass)
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'properties'].update({'locked_reason': locked_reason})
+rebuild_server_with_admin_pass['response_body']['properties']['server'][
+    'required'].append('locked_reason')
+
+update_server = copy.deepcopy(servers271.update_server)
+update_server['response_body']['properties']['server'][
+    'properties'].update({'locked_reason': locked_reason})
+update_server['response_body']['properties']['server'][
+    'required'].append('locked_reason')
+
+get_server = copy.deepcopy(servers271.get_server)
+get_server['response_body']['properties']['server'][
+    'properties'].update({'locked_reason': locked_reason})
+get_server['response_body']['properties']['server'][
+    'required'].append('locked_reason')
+
+list_servers_detail = copy.deepcopy(servers271.list_servers_detail)
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'properties'].update({'locked_reason': locked_reason})
+list_servers_detail['response_body']['properties']['servers']['items'][
+    'required'].append('locked_reason')
+
+# NOTE(lajoskatona): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.71 ***
+list_servers = copy.deepcopy(servers271.list_servers)
+show_server_diagnostics = copy.deepcopy(servers271.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers271.get_remote_consoles)
+list_tags = copy.deepcopy(servers271.list_tags)
+update_all_tags = copy.deepcopy(servers271.update_all_tags)
+delete_all_tags = copy.deepcopy(servers271.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
+update_tag = copy.deepcopy(servers271.update_tag)
+delete_tag = copy.deepcopy(servers271.delete_tag)
+attach_volume = copy.deepcopy(servers271.attach_volume)
+show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers271.show_instance_action)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_79/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/compute/v2_79/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_79/servers.py b/tempest/lib/api_schema/response/compute/v2_79/servers.py
new file mode 100644
index 0000000..b5507f9
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_79/servers.py
@@ -0,0 +1,68 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_73 import servers as servers273
+
+
+###########################################################################
+#
+# 2.79:
+#
+# The delete_on_termination parameter is now returned in the response body
+# of the following calls:
+#
+# - GET /servers/{server_id}/os-volume_attachments
+# - GET /servers/{server_id}/os-volume_attachments/{volume_id}
+# - POST /servers/{server_id}/os-volume_attachments
+###########################################################################
+
+attach_volume = copy.deepcopy(servers273.attach_volume)
+attach_volume['response_body']['properties']['volumeAttachment'][
+    'properties'].update({'delete_on_termination': {'type': 'boolean'}})
+attach_volume['response_body']['properties']['volumeAttachment'][
+    'required'].append('delete_on_termination')
+
+show_volume_attachment = copy.deepcopy(servers273.show_volume_attachment)
+show_volume_attachment['response_body']['properties']['volumeAttachment'][
+    'properties'].update({'delete_on_termination': {'type': 'boolean'}})
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['required'].append('delete_on_termination')
+
+list_volume_attachments = copy.deepcopy(servers273.list_volume_attachments)
+list_volume_attachments['response_body']['properties']['volumeAttachments'][
+    'items']['properties'].update(
+        {'delete_on_termination': {'type': 'boolean'}})
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['required'].append('delete_on_termination')
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.73 ***
+rebuild_server = copy.deepcopy(servers273.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers273.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers273.update_server)
+get_server = copy.deepcopy(servers273.get_server)
+list_servers_detail = copy.deepcopy(servers273.list_servers_detail)
+list_servers = copy.deepcopy(servers273.list_servers)
+show_server_diagnostics = copy.deepcopy(servers273.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers273.get_remote_consoles)
+list_tags = copy.deepcopy(servers273.list_tags)
+update_all_tags = copy.deepcopy(servers273.update_all_tags)
+delete_all_tags = copy.deepcopy(servers273.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
+update_tag = copy.deepcopy(servers273.update_tag)
+delete_tag = copy.deepcopy(servers273.delete_tag)
+show_instance_action = copy.deepcopy(servers273.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_8/servers.py b/tempest/lib/api_schema/response/compute/v2_8/servers.py
index 3dbab3f..119d8e2 100644
--- a/tempest/lib/api_schema/response/compute/v2_8/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_8/servers.py
@@ -38,3 +38,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index ee0313d..9258eec 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -57,3 +57,4 @@
 attach_volume = copy.deepcopy(servers.attach_volume)
 show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
 list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/volume/backups.py b/tempest/lib/api_schema/response/volume/backups.py
new file mode 100644
index 0000000..cba7981
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/backups.py
@@ -0,0 +1,229 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+common_show_backup = {
+    'type': 'object',
+    'properties': {
+        'status': {'type': 'string'},
+        'object_count': {'type': 'integer'},
+        'container': {'type': ['string', 'null']},
+        'description': {'type': ['string', 'null']},
+        'links': parameter_types.links,
+        'availability_zone': {'type': ['string', 'null']},
+        'created_at': parameter_types.date_time,
+        'updated_at': parameter_types.date_time_or_null,
+        'name': {'type': ['string', 'null']},
+        'has_dependent_backups': {'type': 'boolean'},
+        'volume_id': {'type': 'string', 'format': 'uuid'},
+        'fail_reason': {'type': ['string', 'null']},
+        'size': {'type': 'integer'},
+        'id': {'type': 'string', 'format': 'uuid'},
+        'is_incremental': {'type': 'boolean'},
+        'data_timestamp': parameter_types.date_time_or_null,
+        'snapshot_id': {'type': ['string', 'null']},
+        # TODO(zhufl): os-backup-project-attr:project_id is added
+        # in 3.18, we should move it to the 3.18 schema file when
+        # microversion is supported in volume interfaces.
+        'os-backup-project-attr:project_id': {
+            'type': 'string', 'format': 'uuid'},
+        # TODO(zhufl): metadata is added in 3.43, we should move it
+        # to the 3.43 schema file when microversion is supported
+        # in volume interfaces.
+        'metadata': {'^.+$': {'type': 'string'}},
+        # TODO(zhufl): user_id is added in 3.56, we should move it
+        # to the 3.56 schema file when microversion is supported
+        # in volume interfaces.
+        'user_id': {'type': 'string'},
+    },
+    'additionalProperties': False,
+    'required': ['status', 'object_count', 'fail_reason', 'links',
+                 'created_at', 'updated_at', 'name', 'volume_id', 'size', 'id',
+                 'data_timestamp']
+}
+
+create_backup = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backup': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'links': parameter_types.links,
+                    'name': {'type': ['string', 'null']},
+                    # TODO(zhufl): metadata is added in 3.43, we should move it
+                    # to the 3.43 schema file when microversion is supported
+                    # in volume interfaces.
+                    'metadata': {'^.+$': {'type': 'string'}},
+                },
+                'additionalProperties': False,
+                'required': ['id', 'links', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backup']
+    }
+}
+
+update_backup = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backup': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'links': parameter_types.links,
+                    'name': {'type': ['string', 'null']},
+                    'metadata': {'^.+$': {'type': 'string'}}
+                },
+                'additionalProperties': False,
+                'required': ['id', 'links', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backup']
+    }
+}
+
+restore_backup = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'restore': {
+                'type': 'object',
+                'properties': {
+                    'backup_id': {'type': 'string', 'format': 'uuid'},
+                    'volume_id': {'type': 'string', 'format': 'uuid'},
+                    'volume_name': {'type': 'string'},
+                },
+                'additionalProperties': False,
+                'required': ['backup_id', 'volume_id', 'volume_name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['restore']
+    }
+}
+
+delete_backup = {'status_code': [202]}
+
+show_backup = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backup': common_show_backup
+        },
+        'additionalProperties': False,
+        'required': ['backup']
+    }
+}
+
+list_backups_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backups': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'links': parameter_types.links,
+                        'id': {'type': 'string', 'format': 'uuid'},
+                        'name': {'type': ['string', 'null']},
+                        # TODO(zhufl): count is added in 3.45, we should move
+                        # it to the 3.45 schema file when microversion is
+                        # supported in volume interfaces
+                        'count': {'type': 'integer'}
+                    },
+                    'additionalProperties': False,
+                    'required': ['links', 'id', 'name']
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backups'],
+    }
+}
+
+list_backups_detail = copy.deepcopy(common_show_backup)
+# TODO(zhufl): count is added in 3.45, we should move it to the 3.45 schema
+# file when microversion is supported in volume interfaces
+list_backups_detail['properties'].update({'count': {'type': 'integer'}})
+list_backups_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backups': {
+                'type': 'array',
+                'items': list_backups_detail
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backups'],
+    }
+}
+
+export_backup = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backup-record': {
+                'type': 'object',
+                'properties': {
+                    'backup_service': {'type': 'string'},
+                    'backup_url': {'type': 'string'}
+                },
+                'additionalProperties': False,
+                'required': ['backup_service', 'backup_url']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backup-record']
+    }
+}
+
+import_backup = {
+    'status_code': [201],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'backup': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'links': parameter_types.links,
+                    'name': {'type': ['string', 'null']},
+                },
+                'additionalProperties': False,
+                'required': ['id', 'links', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['backup']
+    }
+}
+
+reset_backup_status = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/encryption_types.py b/tempest/lib/api_schema/response/volume/encryption_types.py
new file mode 100755
index 0000000..7e7ca4a
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/encryption_types.py
@@ -0,0 +1,95 @@
+# Copyright 2019 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+show_encryption_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': ['object', 'null'],
+        'properties': {
+            'volume_type_id': {'type': 'string', 'format': 'uuid'},
+            'encryption_id': {'type': 'string', 'format': 'uuid'},
+            'key_size': {'type': ['integer', 'null']},
+            'provider': {'type': 'string'},
+            'control_location': {'enum': ['front-end', 'back-end']},
+            'cipher': {'type': ['string', 'null']},
+            'deleted': {'type': 'boolean'},
+            'created_at': parameter_types.date_time,
+            'updated_at': parameter_types.date_time_or_null,
+            'deleted_at': parameter_types.date_time_or_null
+        },
+        # result of show_encryption_type may be empty list,
+        # so no required fields.
+        'additionalProperties': False,
+    }
+}
+
+show_encryption_specs_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'patternProperties': {
+            '^.+$': {'type': 'string'}
+        }
+    }
+}
+
+create_encryption_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'encryption': {
+                'type': 'object',
+                'properties': {
+                    'volume_type_id': {'type': 'string', 'format': 'uuid'},
+                    'encryption_id': {'type': 'string', 'format': 'uuid'},
+                    'key_size': {'type': ['integer', 'null']},
+                    'provider': {'type': 'string'},
+                    'control_location': {'enum': ['front-end', 'back-end']},
+                    'cipher': {'type': ['string', 'null']},
+                },
+                'additionalProperties': False,
+                'required': ['volume_type_id', 'encryption_id']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['encryption']
+    }
+}
+
+delete_encryption_type = {'status_code': [202]}
+
+update_encryption_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'encryption': {
+                'type': 'object',
+                'properties': {
+                    'key_size': {'type': ['integer', 'null']},
+                    'provider': {'type': 'string'},
+                    'control_location': {'enum': ['front-end', 'back-end']},
+                    'cipher': {'type': ['string', 'null']},
+                },
+                # all fields are optional
+                'additionalProperties': False,
+            }
+        },
+        'additionalProperties': False,
+        'required': ['encryption']
+    }
+}
diff --git a/tempest/lib/api_schema/response/volume/group_snapshots.py b/tempest/lib/api_schema/response/volume/group_snapshots.py
new file mode 100644
index 0000000..c75c3ba
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/group_snapshots.py
@@ -0,0 +1,106 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+create_group_snapshot = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_snapshot': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'name': {'type': 'string'},
+                    'group_type_id': {'type': 'string', 'format': 'uuid'},
+                },
+                'additionalProperties': False,
+                'required': ['id', 'name', 'group_type_id']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group_snapshot']
+    }
+}
+
+delete_group_snapshot = {'status_code': [202]}
+
+common_show_group_snapshot = {
+    'type': 'object',
+    'properties': {
+        'created_at': parameter_types.date_time,
+        'group_id': {'type': 'string', 'format': 'uuid'},
+        'id': {'type': 'string', 'format': 'uuid'},
+        'name': {'type': 'string'},
+        'status': {'type': 'string'},
+        'description': {'type': ['string', 'null']},
+        'group_type_id': {'type': 'string', 'format': 'uuid'},
+    },
+    'additionalProperties': False,
+    'required': ['created_at', 'group_id', 'id', 'name',
+                 'status', 'description', 'group_type_id']
+}
+
+show_group_snapshot = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_snapshot': common_show_group_snapshot
+        },
+        'additionalProperties': False,
+        'required': ['group_snapshot']
+    }
+}
+
+list_group_snapshots_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_snapshots': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string', 'format': 'uuid'},
+                        'name': {'type': 'string'}
+                    },
+                    'additionalProperties': False,
+                    'required': ['id', 'name'],
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group_snapshots'],
+    }
+}
+
+list_group_snapshots_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_snapshots': {
+                'type': 'array',
+                'items': common_show_group_snapshot
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group_snapshots'],
+    }
+}
+
+reset_group_snapshot_status = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/group_types.py b/tempest/lib/api_schema/response/volume/group_types.py
new file mode 100644
index 0000000..4fc9ae8
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/group_types.py
@@ -0,0 +1,134 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+group_specs = {
+    'type': 'object',
+    'patternProperties': {
+        '^.+$': {'type': 'string'}
+    }
+}
+
+common_show_group_type = {
+    'type': 'object',
+    'properties': {
+        'id': {'type': 'string'},
+        'is_public': {'type': 'boolean'},
+        'group_specs': group_specs,
+        'description': {'type': ['string', 'null']},
+        'name': {'type': 'string'},
+    },
+    'additionalProperties': False,
+    'required': ['id', 'is_public', 'description', 'name']
+}
+
+create_group_type = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_type': common_show_group_type
+        },
+        'additionalProperties': False,
+        'required': ['group_type']
+    }
+}
+
+delete_group_type = {'status_code': [202]}
+
+list_group_types = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_types': {
+                'type': 'array',
+                'items': common_show_group_type
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group_types'],
+    }
+}
+
+show_group_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_type': common_show_group_type
+        },
+        'additionalProperties': False,
+        'required': ['group_type']
+    }
+}
+
+show_default_group_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_type': common_show_group_type
+        },
+        'additionalProperties': False,
+        'required': ['group_type']
+    }
+}
+
+update_group_type = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_type': common_show_group_type
+        },
+        'additionalProperties': False,
+        'required': ['group_type']
+    }
+}
+
+create_or_update_group_type_specs = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_specs': group_specs,
+        },
+        'additionalProperties': False,
+        'required': ['group_specs']
+    }
+}
+
+list_group_type_specs = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group_specs': group_specs,
+        },
+        'additionalProperties': False,
+        'required': ['group_specs']
+    }
+}
+
+show_group_type_specs_item = {
+    'status_code': [200],
+    'response_body': group_specs
+}
+
+update_group_type_specs_item = {
+    'status_code': [200],
+    'response_body': group_specs
+}
+
+delete_group_type_specs_item = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/groups.py b/tempest/lib/api_schema/response/volume/groups.py
new file mode 100644
index 0000000..f6e4bc2
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/groups.py
@@ -0,0 +1,171 @@
+# Copyright 2015 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+create_group = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'name': {'type': 'string'},
+                },
+                'additionalProperties': False,
+                'required': ['id', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group']
+    }
+}
+
+delete_group = {'status_code': [202]}
+
+show_group = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group': {
+                'type': 'object',
+                'properties': {
+                    'status': {'type': 'string'},
+                    'description': {'type': ['string', 'null']},
+                    'availability_zone': {'type': 'string'},
+                    'created_at': parameter_types.date_time,
+                    'group_type': {'type': 'string', 'format': 'uuid'},
+                    'group_snapshot_id': {'type': ['string', 'null']},
+                    'source_group_id': {'type': ['string', 'null']},
+                    'volume_types': {
+                        'type': 'array',
+                        'items': {'type': 'string', 'format': 'uuid'}
+                    },
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'name': {'type': 'string'},
+                    # TODO(zhufl): volumes is added in 3.25, we should move it
+                    # to the 3.25 schema file when microversion is supported
+                    # in volume interfaces
+                    'volumes': {
+                        'type': 'array',
+                        'items': {'type': 'string', 'format': 'uuid'}
+                    },
+                    # TODO(zhufl): replication_status is added in 3.38, we
+                    # should move it to the 3.38 schema file when microversion
+                    # is supported in volume interfaces
+                    'replication_status': {'type': ['string', 'null']}
+                },
+                'additionalProperties': False,
+                'required': ['status', 'description', 'created_at',
+                             'group_type', 'volume_types', 'id', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group']
+    }
+}
+
+list_groups_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'groups': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string', 'format': 'uuid'},
+                        'name': {'type': 'string'}
+                    },
+                    'additionalProperties': False,
+                    'required': ['id', 'name'],
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['groups'],
+    }
+}
+
+list_groups_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'groups': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'status': {'type': 'string'},
+                        'description': {'type': ['string', 'null']},
+                        'availability_zone': {'type': 'string'},
+                        'created_at': parameter_types.date_time,
+                        'group_type': {'type': 'string', 'format': 'uuid'},
+                        'group_snapshot_id': {'type': ['string', 'null']},
+                        'source_group_id': {'type': ['string', 'null']},
+                        'volume_types': {
+                            'type': 'array',
+                            'items': {'type': 'string', 'format': 'uuid'}
+                        },
+                        'id': {'type': 'string', 'format': 'uuid'},
+                        'name': {'type': 'string'},
+                        # TODO(zhufl): volumes is added in 3.25, we should
+                        # move it to the 3.25 schema file when microversion
+                        # is supported in volume interfaces
+                        'volumes': {
+                            'type': 'array',
+                            'items': {'type': 'string', 'format': 'uuid'}
+                        },
+                        # TODO(zhufl): replication_status is added in 3.38, we
+                        # should move it to the 3.38 schema file when
+                        # microversion is supported in volume interfaces
+                        'replication_status': {'type': ['string', 'null']}
+                    },
+                    'additionalProperties': False,
+                    'required': ['status', 'description', 'created_at',
+                                 'group_type', 'volume_types', 'id', 'name']
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['groups'],
+    }
+}
+
+create_group_from_source = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'group': {
+                'type': 'object',
+                'properties': {
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'name': {'type': 'string'},
+                },
+                'additionalProperties': False,
+                'required': ['id', 'name']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['group']
+    }
+}
+update_group = {'status_code': [202]}
+reset_group_status = {'status_code': [202]}
diff --git a/tempest/lib/api_schema/response/volume/limits.py b/tempest/lib/api_schema/response/volume/limits.py
new file mode 100644
index 0000000..99af180
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/limits.py
@@ -0,0 +1,55 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+show_limits = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'limits': {
+                'type': 'object',
+                'properties': {
+                    'rate': {'type': 'array'},
+                    'absolute': {
+                        'type': 'object',
+                        'properties': {
+                            'totalSnapshotsUsed': {'type': 'integer'},
+                            'maxTotalBackups': {'type': 'integer'},
+                            'maxTotalVolumeGigabytes': {'type': 'integer'},
+                            'maxTotalSnapshots': {'type': 'integer'},
+                            'maxTotalBackupGigabytes': {'type': 'integer'},
+                            'totalBackupGigabytesUsed': {'type': 'integer'},
+                            'maxTotalVolumes': {'type': 'integer'},
+                            'totalVolumesUsed': {'type': 'integer'},
+                            'totalBackupsUsed': {'type': 'integer'},
+                            'totalGigabytesUsed': {'type': 'integer'},
+                        },
+                        'additionalProperties': False,
+                        'required': ['totalSnapshotsUsed', 'maxTotalBackups',
+                                     'maxTotalVolumeGigabytes',
+                                     'maxTotalSnapshots',
+                                     'maxTotalBackupGigabytes',
+                                     'totalBackupGigabytesUsed',
+                                     'maxTotalVolumes', 'totalVolumesUsed',
+                                     'totalBackupsUsed', 'totalGigabytesUsed']
+                    }
+                },
+                'additionalProperties': False,
+                'required': ['rate', 'absolute'],
+            }
+        },
+        'additionalProperties': False,
+        'required': ['limits']
+    }
+}
diff --git a/tempest/lib/api_schema/response/volume/manage_volume.py b/tempest/lib/api_schema/response/volume/manage_volume.py
new file mode 100644
index 0000000..d3acfd9
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/manage_volume.py
@@ -0,0 +1,27 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.api_schema.response.volume import volumes
+
+
+manage_volume = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volume': volumes.common_show_volume},
+        'additionalProperties': False,
+        'required': ['volume']
+    }
+}
diff --git a/tempest/lib/api_schema/response/volume/scheduler_stats.py b/tempest/lib/api_schema/response/volume/scheduler_stats.py
new file mode 100644
index 0000000..b5d7d2c
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/scheduler_stats.py
@@ -0,0 +1,79 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+get_pools_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'pools': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'name': {'type': 'string'},
+                    },
+                    'additionalProperties': False,
+                    'required': ['name']
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['pools'],
+    }
+}
+
+get_pools_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'pools': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'name': {'type': 'string'},
+                        'capabilities': {
+                            'type': ['object', 'null'],
+                            'properties': {
+                                'updated': parameter_types.date_time_or_null,
+                                'QoS_support': {'type': 'boolean'},
+                                'total_capacity_gb': {
+                                    'type': ['number', 'string']
+                                },
+                                'volume_backend_name': {'type': 'string'},
+                                'free_capacity_gb': {
+                                    'type': ['number', 'string']
+                                },
+                                'driver_version': {'type': 'string'},
+                                'reserved_percentage': {'type': 'integer'},
+                                'storage_protocol': {'type': 'string'},
+                                'vendor_name': {'type': 'string'},
+                                'timestamp': parameter_types.date_time_or_null
+                            },
+                            # Because some legacy volumes or backends may not
+                            # support pools, so no required fields here.
+                        },
+                    },
+                    'additionalProperties': False,
+                    'required': ['name', 'capabilities']
+                }
+            }
+        },
+        'additionalProperties': False,
+        'required': ['pools'],
+    }
+}
diff --git a/tempest/lib/api_schema/response/volume/services.py b/tempest/lib/api_schema/response/volume/services.py
index 70de878..216631c 100644
--- a/tempest/lib/api_schema/response/volume/services.py
+++ b/tempest/lib/api_schema/response/volume/services.py
@@ -33,10 +33,6 @@
                         'frozen': {'type': 'boolean'},
                         'updated_at': parameter_types.date_time,
                         'zone': {'type': 'string'},
-                        # TODO(zhufl): cluster is added in 3.7, we should move
-                        # it to the 3.7 schema file when microversion is
-                        # supported in volume interfaces
-                        'cluster': {'type': 'string'},
                         'replication_status': {'type': 'string'},
                         'active_backend_id': {'type': ['string', 'null']},
                         'backend_state': {'type': 'string'},
diff --git a/tempest/lib/api_schema/response/volume/snapshots.py b/tempest/lib/api_schema/response/volume/snapshots.py
new file mode 100644
index 0000000..9d52801
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/snapshots.py
@@ -0,0 +1,198 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+metadata = {
+    'type': 'object',
+    'patternProperties': {
+        '^.+$': {'type': 'string'}
+    }
+}
+
+common_snapshot_schema = {
+    'type': 'object',
+    'properties': {
+        'status': {'type': 'string'},
+        'description': {'type': ['string', 'null']},
+        'created_at': parameter_types.date_time,
+        'name': {'type': ['string', 'null']},
+        'volume_id': {'type': 'string', 'format': 'uuid'},
+        'metadata': metadata,
+        'id': {'type': 'string', 'format': 'uuid'},
+        'size': {'type': 'integer'},
+        'updated_at': parameter_types.date_time_or_null,
+        # TODO(zhufl): user_id is added in 3.41, we should move it
+        # to the 3.41 schema file when microversion is supported
+        # in volume interfaces
+        # 'user_id': {'type': 'string', 'format': 'uuid'}
+    },
+    'additionalProperties': False,
+    'required': ['status', 'description', 'created_at', 'metadata',
+                 'name', 'volume_id', 'id', 'size', 'updated_at']
+}
+
+common_snapshot_detail_schema = copy.deepcopy(common_snapshot_schema)
+common_snapshot_detail_schema['properties'].update(
+    {'os-extended-snapshot-attributes:progress': {'type': 'string'},
+     'os-extended-snapshot-attributes:project_id': {
+         'type': 'string', 'format': 'uuid'}})
+common_snapshot_detail_schema['required'].extend(
+    ['os-extended-snapshot-attributes:progress',
+     'os-extended-snapshot-attributes:project_id'])
+
+list_snapshots_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'snapshots': {
+                'type': 'array',
+                'items': common_snapshot_schema
+            },
+            'snapshots_links': parameter_types.links,
+            # TODO(zhufl): count is added in 3.45, we should move
+            # it to the 3.45 schema file when microversion is
+            # supported in volume interfaces
+            # 'count': {'type': 'integer'}
+        },
+        'additionalProperties': False,
+        'required': ['snapshots'],
+    }
+}
+
+list_snapshots_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'snapshots': {
+                'type': 'array',
+                'items': common_snapshot_detail_schema
+            },
+            'snapshots_links': parameter_types.links,
+            # TODO(zhufl): count is added in 3.45, we should move
+            # it to the 3.45 schema file when microversion is
+            # supported in volume interfaces
+            # 'count': {'type': 'integer'},
+        },
+        'additionalProperties': False,
+        'required': ['snapshots'],
+    }
+}
+
+show_snapshot = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'snapshot': common_snapshot_detail_schema
+        },
+        'additionalProperties': False,
+        'required': ['snapshot'],
+    }
+}
+
+create_snapshot = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'snapshot': common_snapshot_schema
+        },
+        'additionalProperties': False,
+        'required': ['snapshot'],
+    }
+}
+
+update_snapshot = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'snapshot': common_snapshot_schema
+        },
+        'additionalProperties': False,
+        'required': ['snapshot'],
+    }
+}
+
+delete_snapshot = {'status_code': [202]}
+reset_snapshot_status = {'status_code': [202]}
+update_snapshot_status = {'status_code': [202]}
+
+create_snapshot_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': metadata
+        },
+        'additionalProperties': False,
+        'required': ['metadata'],
+    }
+}
+
+show_snapshot_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': metadata
+        },
+        'additionalProperties': False,
+        'required': ['metadata'],
+    }
+}
+
+update_snapshot_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': metadata
+        },
+        'additionalProperties': False,
+        'required': ['metadata'],
+    }
+}
+
+show_snapshot_metadata_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': metadata
+        },
+        'additionalProperties': False,
+        'required': ['meta'],
+    }
+}
+
+update_snapshot_metadata_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': metadata
+        },
+        'additionalProperties': False,
+        'required': ['meta'],
+    }
+}
+
+delete_snapshot_metadata_item = {'status_code': [200]}
+force_delete_snapshot = {'status_code': [202]}
+unmanage_snapshot = {'status_code': [202]}
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/volume/v3_7/__init__.py
similarity index 100%
copy from tempest/tests/lib/services/volume/v1/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_7/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_7/services.py b/tempest/lib/api_schema/response/volume/v3_7/services.py
new file mode 100644
index 0000000..8d43188
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_7/services.py
@@ -0,0 +1,34 @@
+# Copyright 2020 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.volume import services
+
+# Volume microversion 3.7:
+# 1. New optional attribute in 'services' dict.
+#      'cluster'
+
+list_services = copy.deepcopy(services.list_services)
+list_services['response_body']['properties']['services']['items'][
+    'properties'].update({'cluster': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 3.0 ******
+enable_service = copy.deepcopy(services.enable_service)
+disable_service = copy.deepcopy(services.disable_service)
+disable_log_reason = copy.deepcopy(services.disable_log_reason)
+freeze_host = copy.deepcopy(services.freeze_host)
+thaw_host = copy.deepcopy(services.thaw_host)
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
new file mode 100644
index 0000000..ffcf488
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -0,0 +1,368 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+
+attachments = {
+    'type': 'array',
+    'items': {
+        'type': 'object',
+        'properties': {
+            'server_id': {'type': 'string', 'format': 'uuid'},
+            'attachment_id': {'type': 'string', 'format': 'uuid'},
+            'attached_at': parameter_types.date_time_or_null,
+            'host_name': {'type': ['string', 'null']},
+            'volume_id': {'type': 'string', 'format': 'uuid'},
+            'device': {'type': ['string', 'null']},
+            'id': {'type': 'string', 'format': 'uuid'}
+        },
+        'additionalProperties': False,
+        'required': ['server_id', 'attachment_id', 'host_name',
+                     'volume_id', 'device', 'id']
+    }
+}
+
+common_show_volume = {
+    'type': 'object',
+    'properties': {
+        'migration_status': {'type': ['string', 'null']},
+        'attachments': attachments,
+        'links': parameter_types.links,
+        'availability_zone': {'type': ['string', 'null']},
+        'os-vol-host-attr:host': {
+            'type': ['string', 'null'], 'pattern': '.+@.+#.+'},
+        'encrypted': {'type': 'boolean'},
+        'updated_at': parameter_types.date_time_or_null,
+        'replication_status': {'type': ['string', 'null']},
+        'snapshot_id': parameter_types.uuid_or_null,
+        'id': {'type': 'string', 'format': 'uuid'},
+        'size': {'type': 'integer'},
+        'user_id': {'type': 'string', 'format': 'uuid'},
+        'os-vol-tenant-attr:tenant_id': {'type': 'string',
+                                         'format': 'uuid'},
+        'os-vol-mig-status-attr:migstat': {'type': ['string', 'null']},
+        'metadata': {'type': 'object'},
+        'status': {'type': 'string'},
+        'volume_image_metadata': {'type': ['object', 'null']},
+        'description': {'type': ['string', 'null']},
+        'multiattach': {'type': 'boolean'},
+        'source_volid': parameter_types.uuid_or_null,
+        'consistencygroup_id': parameter_types.uuid_or_null,
+        'os-vol-mig-status-attr:name_id': parameter_types.uuid_or_null,
+        'name': {'type': ['string', 'null']},
+        'bootable': {'type': 'string'},
+        'created_at': parameter_types.date_time,
+        'volume_type': {'type': ['string', 'null']},
+        # TODO(zhufl): group_id is added in 3.13, we should move it to the
+        # 3.13 schema file when microversion is supported in volume interfaces
+        'group_id': parameter_types.uuid_or_null,
+        # TODO(zhufl): provider_id is added in 3.21, we should move it to the
+        # 3.21 schema file when microversion is supported in volume interfaces
+        'provider_id': parameter_types.uuid_or_null,
+        # TODO(zhufl): service_uuid and shared_targets are added in 3.48,
+        # we should move them to the 3.48 schema file when microversion
+        # is supported in volume interfaces.
+        'service_uuid': parameter_types.uuid_or_null,
+        'shared_targets': {'type': 'boolean'}
+    },
+    'additionalProperties': False,
+    'required': ['attachments', 'links', 'encrypted',
+                 'updated_at', 'replication_status', 'id',
+                 'size', 'user_id', 'availability_zone',
+                 'metadata', 'status', 'description',
+                 'multiattach', 'consistencygroup_id',
+                 'name', 'bootable', 'created_at',
+                 'volume_type', 'snapshot_id', 'source_volid']
+}
+
+list_volumes_no_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volumes': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'links': parameter_types.links,
+                        'id': {'type': 'string', 'format': 'uuid'},
+                        'name': {'type': ['string', 'null']},
+                        # TODO(zhufl): count is added in 3.45, we should move
+                        # it to the 3.45 schema file when microversion is
+                        # supported in volume interfaces
+                        # 'count': {'type': 'integer'}
+                    },
+                    'additionalProperties': False,
+                    'required': ['links', 'id', 'name']
+                }
+            },
+            'volumes_links': parameter_types.links
+        },
+        'additionalProperties': False,
+        'required': ['volumes']
+    }
+}
+
+show_volume = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volume': common_show_volume
+        },
+        'additionalProperties': False,
+        'required': ['volume']
+    }
+}
+
+list_volumes_detail = copy.deepcopy(common_show_volume)
+# TODO(zhufl): count is added in 3.45, we should move it to the 3.45 schema
+# file when microversion is supported in volume interfaces
+# list_volumes_detail['properties'].update({'count': {'type': 'integer'}})
+list_volumes_with_detail = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volumes': {
+                'type': 'array',
+                'items': list_volumes_detail
+            },
+            'volumes_links': parameter_types.links
+        },
+        'additionalProperties': False,
+        'required': ['volumes']
+    }
+}
+
+create_volume = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volume': {
+                'type': 'object',
+                'properties': {
+                    'migration_status': {'type': ['string', 'null']},
+                    'attachments': attachments,
+                    'links': parameter_types.links,
+                    'availability_zone': {'type': ['string', 'null']},
+                    'encrypted': {'type': 'boolean'},
+                    'updated_at': parameter_types.date_time_or_null,
+                    'replication_status': {'type': ['string', 'null']},
+                    'snapshot_id': parameter_types.uuid_or_null,
+                    'id': {'type': 'string', 'format': 'uuid'},
+                    'size': {'type': 'integer'},
+                    'user_id': {'type': 'string', 'format': 'uuid'},
+                    'metadata': {'type': 'object'},
+                    'status': {'type': 'string'},
+                    'description': {'type': ['string', 'null']},
+                    'multiattach': {'type': 'boolean'},
+                    'source_volid': parameter_types.uuid_or_null,
+                    'consistencygroup_id': parameter_types.uuid_or_null,
+                    'name': {'type': ['string', 'null']},
+                    'bootable': {'type': 'string'},
+                    'created_at': parameter_types.date_time,
+                    'volume_type': {'type': ['string', 'null']},
+                    # TODO(zhufl): group_id is added in 3.13, we should move
+                    # it to the 3.13 schema file when microversion is
+                    # supported in volume interfaces.
+                    'group_id': parameter_types.uuid_or_null,
+                    # TODO(zhufl): provider_id is added in 3.21, we should
+                    # move it to the 3.21 schema file when microversion is
+                    # supported in volume interfaces
+                    'provider_id': parameter_types.uuid_or_null,
+                    # TODO(zhufl): service_uuid and shared_targets are added
+                    # in 3.48, we should move them to the 3.48 schema file
+                    # when microversion is supported in volume interfaces.
+                    'service_uuid': parameter_types.uuid_or_null,
+                    'shared_targets': {'type': 'boolean'}
+                },
+                'additionalProperties': False,
+                'required': ['attachments', 'links', 'encrypted',
+                             'updated_at', 'replication_status', 'id',
+                             'size', 'user_id', 'availability_zone',
+                             'metadata', 'status', 'description',
+                             'multiattach', 'consistencygroup_id',
+                             'name', 'bootable', 'created_at',
+                             'volume_type', 'snapshot_id', 'source_volid']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['volume']
+    }
+}
+
+update_volume = copy.deepcopy(create_volume)
+update_volume.update({'status_code': [200]})
+
+delete_volume = {'status_code': [202]}
+
+show_volume_summary = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volume-summary': {
+                'type': 'object',
+                'properties': {
+                    'total_size': {'type': 'integer'},
+                    'total_count': {'type': 'integer'},
+                    # TODO(zhufl): metadata is added in 3.36, we should move
+                    # it to the 3.36 schema file when microversion is
+                    # supported in volume interfaces
+                    'metadata': {'type': 'object'},
+                },
+                'additionalProperties': False,
+                'required': ['total_size', 'total_count']
+            }
+        },
+        'additionalProperties': False,
+        'required': ['volume-summary']
+    }
+}
+
+# TODO(zhufl): This is under discussion, so will be merged in a seperate patch.
+# https://bugs.launchpad.net/cinder/+bug/1880566
+# upload_volume = {
+#     'status_code': [202],
+#     'response_body': {
+#         'type': 'object',
+#         'properties': {
+#             'os-volume_upload_image': {
+#                 'type': 'object',
+#                 'properties': {
+#                     'status': {'type': 'string'},
+#                     'image_name': {'type': 'string'},
+#                     'disk_format': {'type': 'string'},
+#                     'container_format': {'type': 'string'},
+#                     'is_public': {'type': 'boolean'},
+#                     'visibility': {'type': 'string'},
+#                     'protected': {'type': 'boolean'},
+#                     'updated_at': parameter_types.date_time_or_null,
+#                     'image_id': {'type': 'string', 'format': 'uuid'},
+#                     'display_description': {'type': ['string', 'null']},
+#                     'id': {'type': 'string', 'format': 'uuid'},
+#                     'size': {'type': 'integer'},
+#                     'volume_type': {
+#                         'type': ['object', 'null'],
+#                         'properties': {
+#                             'created_at': parameter_types.date_time,
+#                             'deleted': {'type': 'boolean'},
+#                             'deleted_at': parameter_types.date_time_or_null,
+#                             'description': {'type': ['string', 'null']},
+#                             'extra_specs': {
+#                                 'type': 'object',
+#                                 'patternProperties': {
+#                                     '^.+$': {'type': 'string'}
+#                                 }
+#                             },
+#                             'id': {'type': 'string', 'format': 'uuid'},
+#                             'is_public': {'type': 'boolean'},
+#                             'name': {'type': ['string', 'null']},
+#                             'qos_specs_id': parameter_types.uuid_or_null,
+#                             'updated_at': parameter_types.date_time_or_null
+#                         },
+#                     }
+#                 },
+#                 'additionalProperties': False,
+#                 'required': ['status', 'image_name', 'updated_at',
+#                              'image_id',
+#                              'display_description', 'id', 'size',
+#                              'volume_type', 'disk_format',
+#                              'container_format']
+#             }
+#         },
+#         'additionalProperties': False,
+#         'required': ['os-volume_upload_image']
+#     }
+# }
+
+attach_volume = {'status_code': [202]}
+set_bootable_volume = {'status_code': [200]}
+detach_volume = {'status_code': [202]}
+reserve_volume = {'status_code': [202]}
+unreserve_volume = {'status_code': [202]}
+extend_volume = {'status_code': [202]}
+reset_volume_status = {'status_code': [202]}
+update_volume_readonly = {'status_code': [202]}
+force_delete_volume = {'status_code': [202]}
+retype_volume = {'status_code': [202]}
+force_detach_volume = {'status_code': [202]}
+
+create_volume_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {'type': 'object'},
+        },
+        'additionalProperties': False,
+        'required': ['metadata']
+    }
+}
+
+show_volume_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {'type': 'object'},
+        },
+        'additionalProperties': False,
+        'required': ['metadata']
+    }
+}
+update_volume_metadata = copy.deepcopy(show_volume_metadata)
+
+show_volume_metadata_item = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'meta': {'type': 'object'},
+        },
+        'additionalProperties': False,
+        'required': ['meta']
+    }
+}
+update_volume_metadata_item = copy.deepcopy(show_volume_metadata_item)
+delete_volume_metadata_item = {'status_code': [200]}
+
+update_volume_image_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {'metadata': {'type': 'object'}},
+        'additionalProperties': False,
+        'required': ['metadata']
+    }
+}
+delete_volume_image_metadata = {'status_code': [200]}
+show_volume_image_metadata = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'metadata': {'type': 'object'},
+        },
+        'additionalProperties': False,
+        'required': ['metadata']
+    }
+}
+
+unmanage_volume = {'status_code': [202]}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 8e6d3d5..8bdf98e 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -18,10 +18,9 @@
 import copy
 import datetime
 import re
+from urllib import parse as urlparse
 
 from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
 
 from tempest.lib import exceptions
 from tempest.lib.services.identity.v2 import token_client as json_v2id
@@ -65,8 +64,7 @@
     return url
 
 
-@six.add_metaclass(abc.ABCMeta)
-class AuthProvider(object):
+class AuthProvider(object, metaclass=abc.ABCMeta):
     """Provide authentication"""
 
     SCOPES = set(['project'])
@@ -391,7 +389,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
@@ -428,7 +426,7 @@
 class KeystoneV3AuthProvider(KeystoneAuthProvider):
     """Provides authentication based on the Identity V3 API"""
 
-    SCOPES = set(['project', 'domain', 'unscoped', None])
+    SCOPES = set(['system', 'project', 'domain', 'unscoped', None])
 
     def _auth_client(self, auth_url):
         return json_v3id.V3TokenClient(
@@ -441,8 +439,8 @@
 
         Fields available in Credentials are passed to the token request,
         depending on the value of scope. Valid values for scope are: "project",
-        "domain". Any other string (e.g. "unscoped") or None will lead to an
-        unscoped token request.
+        "domain", or "system". Any other string (e.g. "unscoped") or None will
+        lead to an unscoped token request.
         """
 
         auth_params = dict(
@@ -465,12 +463,16 @@
                 domain_id=self.credentials.domain_id,
                 domain_name=self.credentials.domain_name)
 
+        if self.scope == 'system':
+            auth_params.update(system='all')
+
         return auth_params
 
     def _fill_credentials(self, auth_data_body):
-        # project or domain, depending on the scope
+        # project, domain, or system depending on the scope
         project = auth_data_body.get('project', None)
         domain = auth_data_body.get('domain', None)
+        system = auth_data_body.get('system', None)
         # user is always there
         user = auth_data_body['user']
         # Set project fields
@@ -490,6 +492,9 @@
                 self.credentials.domain_id = domain['id']
             if self.credentials.domain_name is None:
                 self.credentials.domain_name = domain['name']
+        # Set system scope
+        if system is not None:
+            self.credentials.system = 'all'
         # Set user fields
         if self.credentials.username is None:
             self.credentials.username = user['name']
@@ -524,7 +529,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
@@ -677,14 +682,15 @@
                 raise exceptions.InvalidCredentials(msg)
         for key in attr:
             if key in self.ATTRIBUTES:
-                setattr(self, key, attr[key])
+                if attr[key] is not None:
+                    setattr(self, key, attr[key])
             else:
                 msg = '%s is not a valid attr for %s' % (key, self.__class__)
                 raise exceptions.InvalidCredentials(msg)
 
     def __str__(self):
         """Represent only attributes included in self.ATTRIBUTES"""
-        attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+        attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
         _repr = dict((k, getattr(self, k)) for k in attrs)
         return str(_repr)
 
@@ -741,7 +747,7 @@
 
     def __str__(self):
         """Represent only attributes included in self.ATTRIBUTES"""
-        attrs = [attr for attr in self.ATTRIBUTES if attr is not 'password']
+        attrs = [attr for attr in self.ATTRIBUTES if attr != 'password']
         _repr = dict((k, getattr(self, k)) for k in attrs)
         return str(_repr)
 
@@ -779,7 +785,7 @@
     ATTRIBUTES = ['domain_id', 'domain_name', 'password', 'username',
                   'project_domain_id', 'project_domain_name', 'project_id',
                   'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
-                  'user_domain_name', 'user_id']
+                  'user_domain_name', 'user_id', 'system']
     COLLISIONS = [('project_name', 'tenant_name'), ('project_id', 'tenant_id')]
 
     def __setattr__(self, key, value):
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
 import subprocess
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import base
 import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
                     flags, action, params])
     cmd = cmd.strip()
     LOG.info("running: '%s'", cmd)
-    if six.PY2:
-        cmd = cmd.encode('utf-8')
     cmd = shlex.split(cmd)
     stdout = subprocess.PIPE
     stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
                                        cmd,
                                        result,
                                        result_err)
-    if six.PY2:
-        return result
-    else:
-        return os.fsdecode(result)
+    return os.fsdecode(result)
 
 
 class CLIClient(object):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 71ecb32..0ae11ca 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -16,19 +16,20 @@
 
 import argparse
 import ast
+import contextlib
 import importlib
 import inspect
 import os
 import sys
 import unittest
+import urllib.parse as urlparse
 import uuid
 
 from oslo_utils import uuidutils
-import six.moves.urllib.parse as urlparse
 
 DECORATOR_MODULE = 'decorators'
 DECORATOR_NAME = 'idempotent_id'
-DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE
+DECORATOR_IMPORT = 'tempest.lib.%s' % DECORATOR_MODULE
 IMPORT_LINE = 'from tempest.lib import %s' % DECORATOR_MODULE
 DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE,
                                         DECORATOR_NAME)
@@ -180,34 +181,124 @@
         elif isinstance(node, ast.ImportFrom):
             return '%s.%s' % (node.module, node.names[0].name)
 
+    @contextlib.contextmanager
+    def ignore_site_packages_paths(self):
+        """Removes site-packages directories from the sys.path
+
+        Source:
+            - StackOverflow: https://stackoverflow.com/questions/22195382/
+            - Author: https://stackoverflow.com/users/485844/
+        """
+
+        paths = sys.path
+        # remove all third-party paths
+        # so that only stdlib imports will succeed
+        sys.path = list(filter(
+            None,
+            filter(lambda i: 'site-packages' not in i, sys.path)
+        ))
+        yield
+        sys.path = paths
+
+    def is_std_lib(self, module):
+        """Checks whether the module is part of the stdlib or not
+
+        Source:
+            - StackOverflow: https://stackoverflow.com/questions/22195382/
+            - Author: https://stackoverflow.com/users/485844/
+        """
+
+        if module in sys.builtin_module_names:
+            return True
+
+        with self.ignore_site_packages_paths():
+            imported_module = sys.modules.pop(module, None)
+            try:
+                importlib.import_module(module)
+            except ImportError:
+                return False
+            else:
+                return True
+            finally:
+                if imported_module:
+                    sys.modules[module] = imported_module
+
     def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
-        with open(source_path) as f:
-            src_lines = f.read().split('\n')
-        line_no = 0
-        tempest_imports = [node for node in src_parsed.body
+        import_list = [node for node in src_parsed.body
+                       if isinstance(node, (ast.Import, ast.ImportFrom))]
+
+        if not import_list:
+            print("(WARNING) %s: The file is not valid as it does not contain "
+                  "any import line! Therefore the import needed by "
+                  "@decorators.idempotent_id is not added!" % source_path)
+            return
+
+        tempest_imports = [node for node in import_list
                            if self._import_name(node) and
                            'tempest.' in self._import_name(node)]
-        if not tempest_imports:
-            import_snippet = '\n'.join(('', IMPORT_LINE, ''))
-        else:
-            for node in tempest_imports:
-                if self._import_name(node) < DECORATOR_IMPORT:
-                    continue
-                else:
-                    line_no = node.lineno
-                    import_snippet = IMPORT_LINE
-                    break
+
+        for node in tempest_imports:
+            if self._import_name(node) < DECORATOR_IMPORT:
+                continue
             else:
-                line_no = tempest_imports[-1].lineno
-                while True:
-                    if (not src_lines[line_no - 1] or
-                            getattr(self._next_node(src_parsed.body,
-                                                    tempest_imports[-1]),
-                                    'lineno') == line_no or
-                            line_no == len(src_lines)):
-                        break
-                    line_no += 1
-                import_snippet = '\n'.join((IMPORT_LINE, ''))
+                line_no = node.lineno
+                break
+        else:
+            if tempest_imports:
+                line_no = tempest_imports[-1].lineno + 1
+
+        # Insert import line between existing tempest imports
+        if tempest_imports:
+            patcher.add_patch(source_path, IMPORT_LINE, line_no)
+            return
+
+        # Group space separated imports together
+        grouped_imports = {}
+        first_import_line = import_list[0].lineno
+        for idx, import_line in enumerate(import_list, first_import_line):
+            group_no = import_line.lineno - idx
+            group = grouped_imports.get(group_no, [])
+            group.append(import_line)
+            grouped_imports[group_no] = group
+
+        if len(grouped_imports) > 3:
+            print("(WARNING) %s: The file contains more than three import "
+                  "groups! This is not valid according to the PEP8 "
+                  "style guide. " % source_path)
+
+        # Divide grouped_imports into groupes based on PEP8 style guide
+        pep8_groups = {}
+        package_name = self.package.__name__.split(".")[0]
+        for key in grouped_imports:
+            module = self._import_name(grouped_imports[key][0]).split(".")[0]
+            if module.startswith(package_name):
+                group = pep8_groups.get('3rd_group', [])
+                pep8_groups['3rd_group'] = group + grouped_imports[key]
+            elif self.is_std_lib(module):
+                group = pep8_groups.get('1st_group', [])
+                pep8_groups['1st_group'] = group + grouped_imports[key]
+            else:
+                group = pep8_groups.get('2nd_group', [])
+                pep8_groups['2nd_group'] = group + grouped_imports[key]
+
+        for node in pep8_groups.get('2nd_group', []):
+            if self._import_name(node) < DECORATOR_IMPORT:
+                continue
+            else:
+                line_no = node.lineno
+                import_snippet = IMPORT_LINE
+                break
+        else:
+            if pep8_groups.get('2nd_group', []):
+                line_no = pep8_groups['2nd_group'][-1].lineno + 1
+                import_snippet = IMPORT_LINE
+            elif pep8_groups.get('1st_group', []):
+                line_no = pep8_groups['1st_group'][-1].lineno + 1
+                import_snippet = '\n' + IMPORT_LINE
+            else:
+                line_no = pep8_groups['3rd_group'][0].lineno
+                import_snippet = IMPORT_LINE + '\n\n'
+
         patcher.add_patch(source_path, import_snippet, line_no)
 
     def get_tests(self):
diff --git a/tempest/lib/cmd/skip_tracker.py b/tempest/lib/cmd/skip_tracker.py
index 87806b7..95376e3 100755
--- a/tempest/lib/cmd/skip_tracker.py
+++ b/tempest/lib/cmd/skip_tracker.py
@@ -31,10 +31,11 @@
 except ImportError:
     launchpad = None
 
-LPCACHEDIR = os.path.expanduser('~/.launchpadlib/cache')
+LPCACHEDIR = os.path.expanduser(os.path.join('~', '.launchpadlib', 'cache'))
 LOG = logging.getLogger(__name__)
 
-BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                       '..', '..', '..'))
 TESTDIR = os.path.join(BASEDIR, 'tempest')
 
 
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index 80dbc1d..db5c8c3 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import testtools
 
 from tempest.lib.common import api_version_request
@@ -117,7 +116,7 @@
     :param response_header: Response header where microversion is
             expected to be present.
     """
-    if not isinstance(api_microversion, six.string_types):
+    if not isinstance(api_microversion, str):
         raise TypeError('api_microversion must be a string')
     api_microversion_header_name = api_microversion_header_name.lower()
     if (api_microversion_header_name not in response_header or
diff --git a/tempest/lib/common/cred_client.py b/tempest/lib/common/cred_client.py
index a81f53c..f13d6d0 100644
--- a/tempest/lib/common/cred_client.py
+++ b/tempest/lib/common/cred_client.py
@@ -13,7 +13,6 @@
 import abc
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import auth
 from tempest.lib import exceptions as lib_exc
@@ -22,8 +21,7 @@
 LOG = logging.getLogger(__name__)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class CredsClient(object):
+class CredsClient(object, metaclass=abc.ABCMeta):
     """This class is a wrapper around the identity clients
 
      to provide a single interface for managing credentials in both v2 and v3
@@ -39,11 +37,15 @@
         self.projects_client = projects_client
         self.roles_client = roles_client
 
-    def create_user(self, username, password, project, email):
+    def create_user(self, username, password, project=None, email=None):
         params = {'name': username,
-                  'password': password,
-                  self.project_id_param: project['id'],
-                  'email': email}
+                  'password': password}
+        # with keystone v3, a default project is not required
+        if project:
+            params[self.project_id_param] = project['id']
+        # email is not a first-class attribute of a user
+        if email:
+            params['email'] = email
         user = self.users_client.create_user(**params)
         if 'user' in user:
             user = user['user']
@@ -83,12 +85,15 @@
                       role['id'], project['id'], user['id'])
 
     @abc.abstractmethod
-    def get_credentials(self, user, project, password):
+    def get_credentials(
+            self, user, project, password, domain=None, system=None):
         """Produces a Credentials object from the details provided
 
         :param user: a user dict
-        :param project: a project dict
+        :param project: a project dict or None if using domain or system scope
         :param password: the password as a string
+        :param domain: a domain dict
+        :param system: a system dict
         :return: a Credentials object with all the available credential details
         """
         pass
@@ -116,7 +121,8 @@
     def delete_project(self, project_id):
         self.projects_client.delete_tenant(project_id)
 
-    def get_credentials(self, user, project, password):
+    def get_credentials(
+        self, user, project, password, domain=None, system=None):
         # User and project already include both ID and name here,
         # so there's no need to use the fill_in mode
         return auth.get_credentials(
@@ -156,23 +162,62 @@
     def delete_project(self, project_id):
         self.projects_client.delete_project(project_id)
 
-    def get_credentials(self, user, project, password):
+    def create_domain(self, name, description):
+        domain = self.domains_client.create_domain(
+            name=name, description=description)['domain']
+        return domain
+
+    def delete_domain(self, domain_id):
+        self.domains_client.update_domain(domain_id, enabled=False)
+        self.domains_client.delete_domain(domain_id)
+
+    def create_user(self, username, password, project=None, email=None,
+                    domain_id=None):
+        params = {'name': username,
+                  'password': password,
+                  'domain_id': domain_id or self.creds_domain['id']}
+        # with keystone v3, a default project is not required
+        if project:
+            params[self.project_id_param] = project['id']
+        # email is not a first-class attribute of a user
+        if email:
+            params['email'] = email
+        user = self.users_client.create_user(**params)
+        if 'user' in user:
+            user = user['user']
+        return user
+
+    def get_credentials(
+            self, user, project, password, domain=None, system=None):
         # User, project and domain already include both ID and name here,
         # so there's no need to use the fill_in mode.
         # NOTE(andreaf) We need to set all fields in the returned credentials.
         # Scope is then used to pick only those relevant for the type of
         # token needed by each service client.
+        if project:
+            project_name = project['name']
+            project_id = project['id']
+        else:
+            project_name = None
+            project_id = None
+        if domain:
+            domain_name = domain['name']
+            domain_id = domain['id']
+        else:
+            domain_name = self.creds_domain['name']
+            domain_id = self.creds_domain['id']
         return auth.get_credentials(
             auth_url=None,
             fill_in=False,
             identity_version='v3',
             username=user['name'], user_id=user['id'],
-            project_name=project['name'], project_id=project['id'],
+            project_name=project_name, project_id=project_id,
             password=password,
             project_domain_id=self.creds_domain['id'],
             project_domain_name=self.creds_domain['name'],
-            domain_id=self.creds_domain['id'],
-            domain_name=self.creds_domain['name'])
+            domain_id=domain_id,
+            domain_name=domain_name,
+            system=system)
 
     def assign_user_role_on_domain(self, user, role_name, domain=None):
         """Assign the specified role on a domain
@@ -197,6 +242,23 @@
             LOG.debug("Role %s already assigned on domain %s for user %s",
                       role['id'], domain['id'], user['id'])
 
+    def assign_user_role_on_system(self, user, role_name):
+        """Assign the specified role on the system
+
+        :param user: a user dict
+        :param role_name: name of the role to be assigned
+        """
+        role = self._check_role_exists(role_name)
+        if not role:
+            msg = 'No "%s" role found' % role_name
+            raise lib_exc.NotFound(msg)
+        try:
+            self.roles_client.create_user_role_on_system(
+                user['id'], role['id'])
+        except lib_exc.Conflict:
+            LOG.debug("Role %s already assigned on the system for user %s",
+                      role['id'], user['id'])
+
 
 def get_creds_client(identity_client,
                      projects_client,
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 42ed41b..2da206f 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,15 +13,15 @@
 #    limitations under the License.
 
 import abc
-
-import six
+from oslo_log import log as logging
 
 from tempest.lib import auth
 from tempest.lib import exceptions
 
+LOG = logging.getLogger(__name__)
 
-@six.add_metaclass(abc.ABCMeta)
-class CredentialProvider(object):
+
+class CredentialProvider(object, metaclass=abc.ABCMeta):
     def __init__(self, identity_version, name=None,
                  network_resources=None, credentials_domain=None,
                  admin_role=None, identity_uri=None):
@@ -60,6 +60,54 @@
         return
 
     @abc.abstractmethod
+    def get_system_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_system_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_system_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_reader_creds(self):
+        return
+
+    @abc.abstractmethod
     def clear_creds(self):
         return
 
@@ -72,13 +120,25 @@
         return
 
     @abc.abstractmethod
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         return
 
     @abc.abstractmethod
     def is_role_available(self, role):
         return
 
+    def cleanup_default_secgroup(self, security_group_client, tenant):
+        resp_body = security_group_client.list_security_groups(
+            tenant_id=tenant,
+            name="default")
+        secgroups_to_delete = resp_body['security_groups']
+        for secgroup in secgroups_to_delete:
+            try:
+                security_group_client.delete_security_group(secgroup['id'])
+            except exceptions.NotFound:
+                LOG.warning('Security group %s, id %s not found for clean-up',
+                            secgroup['name'], secgroup['id'])
+
 
 class TestResources(object):
     """Readonly Credentials, with network resources added."""
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index f27e926..be8c0e8 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -16,7 +16,6 @@
 
 import netaddr
 from oslo_log import log as logging
-import six
 
 from tempest.lib.common import cred_client
 from tempest.lib.common import cred_provider
@@ -142,7 +141,14 @@
         else:
             # We use a dedicated client manager for identity client in case we
             # need a different token scope for them.
-            scope = 'domain' if self.identity_admin_domain_scope else 'project'
+            if self.default_admin_creds.system:
+                scope = 'system'
+            elif (self.identity_admin_domain_scope and
+                  (self.default_admin_creds.domain_id or
+                   self.default_admin_creds.domain_name)):
+                scope = 'domain'
+            else:
+                scope = 'project'
             identity_os = clients.ServiceClients(self.default_admin_creds,
                                                  self.identity_uri,
                                                  scope=scope)
@@ -157,62 +163,101 @@
                     os.network.PortsClient(),
                     os.network.SecurityGroupsClient())
 
-    def _create_creds(self, admin=False, roles=None):
+    def _create_creds(self, admin=False, roles=None, scope='project'):
         """Create credentials with random name.
 
-        Creates project and user. When admin flag is True create user
-        with admin role. Assign user with additional roles (for example
-        _member_) and roles requested by caller.
+        Creates user and role assignments on a project, domain, or system. When
+        the admin flag is True, creates user with the admin role on the
+        resource. If roles are provided, assigns those roles on the resource.
+        Otherwise, assigns the user the 'member' role on the resource.
 
         :param admin: Flag if to assign to the user admin role
         :type admin: bool
         :param roles: Roles to assign for the user
         :type roles: list
+        :param str scope: The scope for the role assignment, may be one of
+                          'project', 'domain', or 'system'.
         :return: Readonly Credentials with network resources
+        :raises: Exception if scope is invalid
         """
+        if not roles:
+            roles = []
         root = self.name
 
-        project_name = data_utils.rand_name(root, prefix=self.resource_prefix)
-        project_desc = project_name + "-desc"
-        project = self.creds_client.create_project(
-            name=project_name, description=project_desc)
+        cred_params = {
+            'project': None,
+            'domain': None,
+            'system': None
+        }
+        if scope == 'project':
+            project_name = data_utils.rand_name(
+                root, prefix=self.resource_prefix)
+            project_desc = project_name + '-desc'
+            project = self.creds_client.create_project(
+                name=project_name, description=project_desc)
 
-        # NOTE(andreaf) User and project can be distinguished from the context,
-        # having the same ID in both makes it easier to match them and debug.
-        username = project_name
-        user_password = data_utils.rand_password()
-        email = data_utils.rand_name(
-            root, prefix=self.resource_prefix) + "@example.com"
-        user = self.creds_client.create_user(
-            username, user_password, project, email)
-        role_assigned = False
+            # NOTE(andreaf) User and project can be distinguished from the
+            # context, having the same ID in both makes it easier to match them
+            # and debug.
+            username = project_name + '-project'
+            cred_params['project'] = project
+        elif scope == 'domain':
+            domain_name = data_utils.rand_name(
+                root, prefix=self.resource_prefix)
+            domain_desc = domain_name + '-desc'
+            domain = self.creds_client.create_domain(
+                name=domain_name, description=domain_desc)
+            username = domain_name + '-domain'
+            cred_params['domain'] = domain
+        elif scope == 'system':
+            prefix = data_utils.rand_name(root, prefix=self.resource_prefix)
+            username = prefix + '-system'
+            cred_params['system'] = 'all'
+        else:
+            raise lib_exc.InvalidScopeType(scope=scope)
         if admin:
-            self.creds_client.assign_user_role(user, project, self.admin_role)
-            role_assigned = True
+            username += '-admin'
+        elif roles and len(roles) == 1:
+            username += '-' + roles[0]
+        user_password = data_utils.rand_password()
+        cred_params['password'] = user_password
+        user = self.creds_client.create_user(
+            username, user_password)
+        cred_params['user'] = user
+        roles_to_assign = [r for r in roles]
+        if admin:
+            roles_to_assign.append(self.admin_role)
+            if scope == 'project':
+                self.creds_client.assign_user_role(
+                    user, project, self.identity_admin_role)
             if (self.identity_version == 'v3' and
                     self.identity_admin_domain_scope):
                 self.creds_client.assign_user_role_on_domain(
                     user, self.identity_admin_role)
         # Add roles specified in config file
-        for conf_role in self.extra_roles:
-            self.creds_client.assign_user_role(user, project, conf_role)
-            role_assigned = True
-        # Add roles requested by caller
-        if roles:
-            for role in roles:
-                self.creds_client.assign_user_role(user, project, role)
-                role_assigned = True
+        roles_to_assign.extend(self.extra_roles)
+        # If there are still no roles, default to 'member'
         # NOTE(mtreinish) For a user to have access to a project with v3 auth
         # it must beassigned a role on the project. So we need to ensure that
         # our newly created user has a role on the newly created project.
-        if self.identity_version == 'v3' and not role_assigned:
+        if not roles_to_assign and self.identity_version == 'v3':
+            roles_to_assign = ['member']
             try:
-                self.creds_client.create_user_role('Member')
+                self.creds_client.create_user_role('member')
             except lib_exc.Conflict:
-                LOG.warning('Member role already exists, ignoring conflict.')
-            self.creds_client.assign_user_role(user, project, 'Member')
+                LOG.warning('member role already exists, ignoring conflict.')
+        for role in roles_to_assign:
+            if scope == 'project':
+                self.creds_client.assign_user_role(user, project, role)
+            elif scope == 'domain':
+                self.creds_client.assign_user_role_on_domain(
+                    user, role, domain)
+            elif scope == 'system':
+                self.creds_client.assign_user_role_on_system(user, role)
+        LOG.info("Dynamic test user %s is created with scope %s and roles: %s",
+                 user['id'], scope, roles_to_assign)
 
-        creds = self.creds_client.get_credentials(user, project, user_password)
+        creds = self.creds_client.get_credentials(**cred_params)
         return cred_provider.TestResources(creds)
 
     def _create_network_resources(self, tenant_id):
@@ -296,7 +341,7 @@
                             tenant_id=tenant_id,
                             enable_dhcp=self.network_resources['dhcp'],
                             ip_version=(ipaddress.ip_network(
-                                six.text_type(subnet_cidr)).version))
+                                str(subnet_cidr)).version))
                 else:
                     resp_body = self.subnets_admin_client.\
                         create_subnet(network_id=network_id,
@@ -304,7 +349,7 @@
                                       name=subnet_name,
                                       tenant_id=tenant_id,
                                       ip_version=(ipaddress.ip_network(
-                                          six.text_type(subnet_cidr)).version))
+                                          str(subnet_cidr)).version))
                 break
             except lib_exc.BadRequest as e:
                 if 'overlaps with another subnet' not in str(e):
@@ -327,50 +372,127 @@
         self.routers_admin_client.add_router_interface(router_id,
                                                        subnet_id=subnet_id)
 
-    def get_credentials(self, credential_type):
-        if self._creds.get(str(credential_type)):
+    def get_credentials(self, credential_type, scope=None):
+        if not scope and self._creds.get(str(credential_type)):
             credentials = self._creds[str(credential_type)]
+        elif scope and (
+                self._creds.get("%s_%s" % (scope, str(credential_type)))):
+            credentials = self._creds["%s_%s" % (scope, str(credential_type))]
         else:
-            if credential_type in ['primary', 'alt', 'admin']:
+            LOG.debug("Creating new dynamic creds for scope: %s and "
+                      "credential_type: %s", scope, credential_type)
+            if scope:
+                if credential_type in [['admin'], ['alt_admin']]:
+                    credentials = self._create_creds(
+                        admin=True, scope=scope)
+                elif credential_type in [['alt_member'], ['alt_reader']]:
+                    cred_type = credential_type[0][4:]
+                    if isinstance(cred_type, str):
+                        cred_type = [cred_type]
+                    credentials = self._create_creds(
+                        roles=cred_type, scope=scope)
+                else:
+                    credentials = self._create_creds(
+                        roles=credential_type, scope=scope)
+            elif credential_type in ['primary', 'alt', 'admin']:
                 is_admin = (credential_type == 'admin')
                 credentials = self._create_creds(admin=is_admin)
             else:
                 credentials = self._create_creds(roles=credential_type)
-            self._creds[str(credential_type)] = credentials
+            if scope:
+                self._creds["%s_%s" %
+                            (scope, str(credential_type))] = credentials
+            else:
+                self._creds[str(credential_type)] = credentials
             # Maintained until tests are ported
             LOG.info("Acquired dynamic creds:\n"
                      " credentials: %s", credentials)
-            if (self.neutron_available and self.create_networks):
-                network, subnet, router = self._create_network_resources(
-                    credentials.tenant_id)
-                credentials.set_resources(network=network, subnet=subnet,
-                                          router=router)
-                LOG.info("Created isolated network resources for:\n"
-                         " credentials: %s", credentials)
+            # NOTE(gmann): For 'domain' and 'system' scoped token, there is no
+            # project_id so we are skipping the network creation for both
+            # scope. How these scoped token can create the network, Nova
+            # server or other project mapped resources is one of the open
+            # question and discussed a lot in Xena cycle PTG. Once we sort
+            # out that then if needed we can update the network creation here.
+            if (not scope or scope == 'project'):
+                if (self.neutron_available and self.create_networks):
+                    network, subnet, router = self._create_network_resources(
+                        credentials.tenant_id)
+                    credentials.set_resources(network=network, subnet=subnet,
+                                              router=router)
+                    LOG.info("Created isolated network resources for:\n"
+                             " credentials: %s", credentials)
+            else:
+                LOG.info("Network resources are not created for scope: %s",
+                         scope)
         return credentials
 
+    # TODO(gmann): Remove this method in favor of get_project_member_creds()
+    # after the deprecation phase.
     def get_primary_creds(self):
         return self.get_credentials('primary')
 
+    # TODO(gmann): Remove this method in favor of get_project_admin_creds()
+    # after the deprecation phase.
     def get_admin_creds(self):
         return self.get_credentials('admin')
 
+    # TODO(gmann): Replace this method with more appropriate name.
+    # like get_project_alt_member_creds()
     def get_alt_creds(self):
         return self.get_credentials('alt')
 
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_system_admin_creds(self):
+        return self.get_credentials(['admin'], scope='system')
+
+    def get_system_member_creds(self):
+        return self.get_credentials(['member'], scope='system')
+
+    def get_system_reader_creds(self):
+        return self.get_credentials(['reader'], scope='system')
+
+    def get_domain_admin_creds(self):
+        return self.get_credentials(['admin'], scope='domain')
+
+    def get_domain_member_creds(self):
+        return self.get_credentials(['member'], scope='domain')
+
+    def get_domain_reader_creds(self):
+        return self.get_credentials(['reader'], scope='domain')
+
+    def get_project_admin_creds(self):
+        return self.get_credentials(['admin'], scope='project')
+
+    def get_project_alt_admin_creds(self):
+        return self.get_credentials(['alt_admin'], scope='project')
+
+    def get_project_member_creds(self):
+        return self.get_credentials(['member'], scope='project')
+
+    def get_project_alt_member_creds(self):
+        return self.get_credentials(['alt_member'], scope='project')
+
+    def get_project_reader_creds(self):
+        return self.get_credentials(['reader'], scope='project')
+
+    def get_project_alt_reader_creds(self):
+        return self.get_credentials(['alt_reader'], scope='project')
+
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         roles = list(set(roles))
         # The roles list as a str will become the index as the dict key for
         # the created credentials set in the dynamic_creds dict.
-        exist_creds = self._creds.get(str(roles))
+        creds_name = str(roles)
+        if scope:
+            creds_name = "%s_%s" % (scope, str(roles))
+        exist_creds = self._creds.get(creds_name)
         # If force_new flag is True 2 cred sets with the same roles are needed
         # handle this by creating a separate index for old one to store it
         # separately for cleanup
         if exist_creds and force_new:
-            new_index = str(roles) + '-' + str(len(self._creds))
+            new_index = creds_name + '-' + str(len(self._creds))
             self._creds[new_index] = exist_creds
-            del self._creds[str(roles)]
-        return self.get_credentials(roles)
+            del self._creds[creds_name]
+        return self.get_credentials(roles, scope=scope)
 
     def _clear_isolated_router(self, router_id, router_name):
         client = self.routers_admin_client
@@ -396,18 +518,6 @@
             LOG.warning('network with name: %s not found for delete',
                         network_name)
 
-    def _cleanup_default_secgroup(self, tenant):
-        nsg_client = self.security_groups_admin_client
-        resp_body = nsg_client.list_security_groups(tenant_id=tenant,
-                                                    name="default")
-        secgroups_to_delete = resp_body['security_groups']
-        for secgroup in secgroups_to_delete:
-            try:
-                nsg_client.delete_security_group(secgroup['id'])
-            except lib_exc.NotFound:
-                LOG.warning('Security group %s, id %s not found for clean-up',
-                            secgroup['name'], secgroup['id'])
-
     def _clear_isolated_net_resources(self):
         client = self.routers_admin_client
         for cred in self._creds:
@@ -443,20 +553,21 @@
         if not self._creds:
             return
         self._clear_isolated_net_resources()
-        for creds in six.itervalues(self._creds):
+        for creds in self._creds.values():
             try:
                 self.creds_client.delete_user(creds.user_id)
             except lib_exc.NotFound:
                 LOG.warning("user with name: %s not found for delete",
                             creds.username)
             # NOTE(zhufl): Only when neutron's security_group ext is
-            # enabled, _cleanup_default_secgroup will not raise error. But
+            # enabled, cleanup_default_secgroup will not raise error. But
             # here cannot use test_utils.is_extension_enabled for it will cause
             # "circular dependency". So here just use try...except to
             # ensure tenant deletion without big changes.
             try:
                 if self.neutron_available:
-                    self._cleanup_default_secgroup(creds.tenant_id)
+                    self.cleanup_default_secgroup(
+                        self.security_groups_admin_client, creds.tenant_id)
             except lib_exc.NotFound:
                 LOG.warning("failed to cleanup tenant %s's secgroup",
                             creds.tenant_name)
@@ -465,6 +576,16 @@
             except lib_exc.NotFound:
                 LOG.warning("tenant with name: %s not found for delete",
                             creds.tenant_name)
+
+            # if cred is domain scoped, delete ephemeral domain
+            # do not delete default domain
+            if (hasattr(creds, 'domain_id') and
+                    creds.domain_id != creds.project_domain_id):
+                try:
+                    self.creds_client.delete_domain(creds.domain_id)
+                except lib_exc.NotFound:
+                    LOG.warning("domain with name: %s not found for delete",
+                                creds.domain_name)
         self._creds = {}
 
     def is_multi_user(self):
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index 8c1a802..33f871b 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import urllib3
 
 
@@ -89,7 +88,7 @@
                 for key, value in info.getheaders().items():
                     # We assume HTTP header name to be string, not random
                     # bytes, thus ensure we have string keys.
-                    self[six.u(key).lower()] = value
+                    self[str(key).lower()] = value
                 self.status = info.status
                 self['status'] = str(self.status)
                 self.reason = info.reason
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index bbf5e89..1618175 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -15,7 +15,6 @@
 import jsonschema
 from oslo_serialization import base64
 from oslo_utils import timeutils
-import six
 
 # JSON Schema validator and format checker used for JSON Schema validation
 JSONSCHEMA_VALIDATOR = jsonschema.Draft4Validator
@@ -32,7 +31,7 @@
 @FORMAT_CHECKER.checks('iso8601-date-time')
 def _validate_datetime_format(instance):
     try:
-        if isinstance(instance, jsonschema.compat.str_types):
+        if instance is not None:
             timeutils.parse_isotime(instance)
     except ValueError:
         return False
@@ -43,7 +42,7 @@
 @jsonschema.FormatChecker.cls_checks('base64')
 def _validate_base64_format(instance):
     try:
-        if isinstance(instance, six.text_type):
+        if isinstance(instance, str):
             instance = instance.encode('utf-8')
         base64.decode_as_bytes(instance)
     except TypeError:
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 1011504..6d948cf 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -12,12 +12,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import os
 
 from oslo_concurrency import lockutils
 from oslo_log import log as logging
-import six
+from oslo_utils.secretutils import md5
 import yaml
 
 from tempest.lib import auth
@@ -104,15 +103,24 @@
         return hash_dict
 
     @classmethod
+    def _append_scoped_role(cls, scope, role, account_hash, hash_dict):
+        key = "%s_%s" % (scope, role)
+        hash_dict['scoped_roles'].setdefault(key, [])
+        hash_dict['scoped_roles'][key].append(account_hash)
+        return hash_dict
+
+    @classmethod
     def get_hash_dict(cls, accounts, admin_role,
                       object_storage_operator_role=None,
                       object_storage_reseller_admin_role=None):
-        hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
+        hash_dict = {'roles': {}, 'creds': {}, 'networks': {},
+                     'scoped_roles': {}}
 
         # Loop over the accounts read from the yaml file
         for account in accounts:
             roles = []
             types = []
+            scope = None
             resources = []
             if 'roles' in account:
                 roles = account.pop('roles')
@@ -120,15 +128,24 @@
                 types = account.pop('types')
             if 'resources' in account:
                 resources = account.pop('resources')
-            temp_hash = hashlib.md5()
+            if 'project_name' in account:
+                scope = 'project'
+            elif 'domain_name' in account:
+                scope = 'domain'
+            elif 'system' in account:
+                scope = 'system'
+            temp_hash = md5(usedforsecurity=False)
             account_for_hash = dict((k, v) for (k, v) in account.items()
                                     if k in cls.HASH_CRED_FIELDS)
-            temp_hash.update(six.text_type(account_for_hash).encode('utf-8'))
+            temp_hash.update(str(account_for_hash).encode('utf-8'))
             temp_hash_key = temp_hash.hexdigest()
             hash_dict['creds'][temp_hash_key] = account
             for role in roles:
                 hash_dict = cls._append_role(role, temp_hash_key,
                                              hash_dict)
+                if scope:
+                    hash_dict = cls._append_scoped_role(
+                        scope, role, temp_hash_key, hash_dict)
             # If types are set for the account append the matching role
             # subdict with the hash
             for type in types:
@@ -172,7 +189,7 @@
         return self.is_multi_user()
 
     def _create_hash_file(self, hash_string):
-        path = os.path.join(os.path.join(self.accounts_dir, hash_string))
+        path = os.path.join(self.accounts_dir, hash_string)
         if not os.path.isfile(path):
             with open(path, 'w') as fd:
                 fd.write(self.name)
@@ -194,25 +211,32 @@
             if res:
                 return _hash
             else:
-                path = os.path.join(os.path.join(self.accounts_dir,
-                                                 _hash))
+                path = os.path.join(self.accounts_dir, _hash)
                 with open(path, 'r') as fd:
                     names.append(fd.read())
         msg = ('Insufficient number of users provided. %s have allocated all '
                'the credentials for this allocation request' % ','.join(names))
         raise lib_exc.InvalidCredentials(msg)
 
-    def _get_match_hash_list(self, roles=None):
+    def _get_match_hash_list(self, roles=None, scope=None):
         hashes = []
         if roles:
             # Loop over all the creds for each role in the subdict and generate
             # a list of cred lists for each role
             for role in roles:
-                temp_hashes = self.hash_dict['roles'].get(role, None)
-                if not temp_hashes:
-                    raise lib_exc.InvalidCredentials(
-                        "No credentials with role: %s specified in the "
-                        "accounts ""file" % role)
+                if scope:
+                    key = "%s_%s" % (scope, role)
+                    temp_hashes = self.hash_dict['scoped_roles'].get(key)
+                    if not temp_hashes:
+                        raise lib_exc.InvalidCredentials(
+                            "No credentials matching role: %s, scope: %s "
+                            "specified in the accounts file" % (role, scope))
+                else:
+                    temp_hashes = self.hash_dict['roles'].get(role, None)
+                    if not temp_hashes:
+                        raise lib_exc.InvalidCredentials(
+                            "No credentials with role: %s specified in the "
+                            "accounts file" % role)
                 hashes.append(temp_hashes)
             # Take the list of lists and do a boolean and between each list to
             # find the creds which fall under all the specified roles
@@ -240,8 +264,8 @@
         temp_creds.pop('password')
         return temp_creds
 
-    def _get_creds(self, roles=None):
-        useable_hashes = self._get_match_hash_list(roles)
+    def _get_creds(self, roles=None, scope=None):
+        useable_hashes = self._get_match_hash_list(roles, scope)
         if not useable_hashes:
             msg = 'No users configured for type/roles %s' % roles
             raise lib_exc.InvalidCredentials(msg)
@@ -283,6 +307,8 @@
         self.remove_hash(_hash)
         LOG.info("%s returned allocated creds:\n%s", self.name, clean_creds)
 
+    # TODO(gmann): Remove this method in favor of get_project_member_creds()
+    # after the deprecation phase.
     def get_primary_creds(self):
         if self._creds.get('primary'):
             return self._creds.get('primary')
@@ -290,6 +316,8 @@
         self._creds['primary'] = net_creds
         return net_creds
 
+    # TODO(gmann): Replace this method with more appropriate name.
+    # like get_project_alt_member_creds()
     def get_alt_creds(self):
         if self._creds.get('alt'):
             return self._creds.get('alt')
@@ -297,9 +325,84 @@
         self._creds['alt'] = net_creds
         return net_creds
 
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_system_admin_creds(self):
+        if self._creds.get('system_admin'):
+            return self._creds.get('system_admin')
+        system_admin = self._get_creds(['admin'], scope='system')
+        self._creds['system_admin'] = system_admin
+        return system_admin
+
+    def get_system_member_creds(self):
+        if self._creds.get('system_member'):
+            return self._creds.get('system_member')
+        system_member = self._get_creds(['member'], scope='system')
+        self._creds['system_member'] = system_member
+        return system_member
+
+    def get_system_reader_creds(self):
+        if self._creds.get('system_reader'):
+            return self._creds.get('system_reader')
+        system_reader = self._get_creds(['reader'], scope='system')
+        self._creds['system_reader'] = system_reader
+        return system_reader
+
+    def get_domain_admin_creds(self):
+        if self._creds.get('domain_admin'):
+            return self._creds.get('domain_admin')
+        domain_admin = self._get_creds(['admin'], scope='domain')
+        self._creds['domain_admin'] = domain_admin
+        return domain_admin
+
+    def get_domain_member_creds(self):
+        if self._creds.get('domain_member'):
+            return self._creds.get('domain_member')
+        domain_member = self._get_creds(['member'], scope='domain')
+        self._creds['domain_member'] = domain_member
+        return domain_member
+
+    def get_domain_reader_creds(self):
+        if self._creds.get('domain_reader'):
+            return self._creds.get('domain_reader')
+        domain_reader = self._get_creds(['reader'], scope='domain')
+        self._creds['domain_reader'] = domain_reader
+        return domain_reader
+
+    def get_project_admin_creds(self):
+        if self._creds.get('project_admin'):
+            return self._creds.get('project_admin')
+        project_admin = self._get_creds(['admin'], scope='project')
+        self._creds['project_admin'] = project_admin
+        return project_admin
+
+    def get_project_alt_admin_creds(self):
+        # TODO(gmann): Implement alt admin hash.
+        return
+
+    def get_project_member_creds(self):
+        if self._creds.get('project_member'):
+            return self._creds.get('project_member')
+        project_member = self._get_creds(['member'], scope='project')
+        self._creds['project_member'] = project_member
+        return project_member
+
+    def get_project_alt_member_creds(self):
+        # TODO(gmann): Implement alt member hash.
+        return
+
+    def get_project_reader_creds(self):
+        if self._creds.get('project_reader'):
+            return self._creds.get('project_reader')
+        project_reader = self._get_creds(['reader'], scope='project')
+        self._creds['project_reader'] = project_reader
+        return project_reader
+
+    def get_project_alt_reader_creds(self):
+        # TODO(gmann): Implement alt reader hash.
+        return
+
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         roles = list(set(roles))
-        exist_creds = self._creds.get(six.text_type(roles).encode(
+        exist_creds = self._creds.get(str(roles).encode(
             'utf-8'), None)
         # The force kwarg is used to allocate an additional set of creds with
         # the same role list. The index used for the previously allocation
@@ -309,17 +412,19 @@
         elif exist_creds and force_new:
             # NOTE(andreaf) In py3.x encode returns bytes, and b'' is bytes
             # In py2.7 encode returns strings, and b'' is still string
-            new_index = six.text_type(roles).encode('utf-8') + b'-' + \
-                six.text_type(len(self._creds)).encode('utf-8')
+            new_index = str(roles).encode('utf-8') + b'-' + \
+                str(len(self._creds)).encode('utf-8')
             self._creds[new_index] = exist_creds
         net_creds = self._get_creds(roles=roles)
-        self._creds[six.text_type(roles).encode('utf-8')] = net_creds
+        self._creds[str(roles).encode('utf-8')] = net_creds
         return net_creds
 
     def clear_creds(self):
         for creds in self._creds.values():
             self.remove_credentials(creds)
 
+    # TODO(gmann): Remove this method in favor of get_project_admin_creds()
+    # after the deprecation phase.
     def get_admin_creds(self):
         return self.get_creds_by_roles([self.admin_role])
 
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 431a0a0..3f735f5 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -14,16 +14,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import collections
+from collections import abc
 import email.utils
 import re
 import time
+import urllib
 
 import jsonschema
 from oslo_log import log as logging
+from oslo_log import versionutils
 from oslo_serialization import jsonutils as json
-import six
-from six.moves import urllib
 
 from tempest.lib.common import http
 from tempest.lib.common import jsonschema_validator
@@ -103,16 +103,18 @@
                                        'location', 'proxy-authenticate',
                                        'retry-after', 'server',
                                        'vary', 'www-authenticate'))
-        dscv = disable_ssl_certificate_validation
+        self.dscv = disable_ssl_certificate_validation
 
         if proxy_url:
             self.http_obj = http.ClosingProxyHttp(
                 proxy_url,
-                disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+                disable_ssl_certificate_validation=self.dscv,
+                ca_certs=ca_certs,
                 timeout=http_timeout, follow_redirects=follow_redirects)
         else:
             self.http_obj = http.ClosingHttp(
-                disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+                disable_ssl_certificate_validation=self.dscv,
+                ca_certs=ca_certs,
                 timeout=http_timeout, follow_redirects=follow_redirects)
 
     def get_headers(self, accept_type=None, send_type=None):
@@ -177,13 +179,27 @@
         return self.auth_provider.credentials.tenant_name
 
     @property
+    def project_id(self):
+        """The project id being used for requests
+
+        :rtype: string
+        :return: The project id being used for requests
+        """
+        return self.auth_provider.credentials.tenant_id
+
+    @property
     def tenant_id(self):
         """The tenant/project id being used for requests
 
         :rtype: string
         :return: The tenant/project id being used for requests
         """
-        return self.auth_provider.credentials.tenant_id
+        # NOTE(ralonsoh): this property should be deprecated, reference
+        # blueprint adopt-oslo-versioned-objects-for-db.
+        versionutils.report_deprecated_feature(
+            self.LOG, '"tenant_id" property is deprecated for removal, use '
+                      '"project_id" instead')
+        return self.project_id
 
     @property
     def password(self):
@@ -401,7 +417,7 @@
     def _safe_body(self, body, maxlen=4096):
         # convert a structure into a string safely
         try:
-            text = six.text_type(body)
+            text = str(body)
         except UnicodeDecodeError:
             # if this isn't actually text, return marker that
             return "<BinaryData: removed>"
@@ -492,7 +508,7 @@
             if not hasattr(body, "keys") or len(body.keys()) != 1:
                 return body
             # Just return the "wrapped" element
-            first_key, first_item = six.next(six.iteritems(body))
+            _, first_item = tuple(body.items())[0]
             if isinstance(first_item, (dict, list)):
                 return first_item
         except (ValueError, IndexError):
@@ -868,12 +884,12 @@
                                                     resp=resp)
 
     def is_absolute_limit(self, resp, resp_body):
-        if (not isinstance(resp_body, collections.Mapping) or
+        if (not isinstance(resp_body, abc.Mapping) or
                 'retry-after' not in resp):
             return True
         return 'exceed' in resp_body.get('message', 'blabla')
 
-    def wait_for_resource_deletion(self, id):
+    def wait_for_resource_deletion(self, id, *args, **kwargs):
         """Waits for a resource to be deleted
 
         This method will loop over is_resource_deleted until either
@@ -886,11 +902,42 @@
         """
         start_time = int(time.time())
         while True:
-            if self.is_resource_deleted(id):
+            if self.is_resource_deleted(id, *args, **kwargs):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
                 message = ('Failed to delete %(resource_type)s %(id)s within '
-                           'the required time (%(timeout)s s).' %
+                           'the required time (%(timeout)s s). Timer started '
+                           'at %(start_time)s. Timer ended at %(end_time)s'
+                           'waited for %(wait_time)s' %
+                           {'resource_type': self.resource_type, 'id': id,
+                            'timeout': self.build_timeout,
+                            'start_time': start_time,
+                            'end_time': int(time.time()),
+                            'wait_time': int(time.time()) - start_time})
+                caller = test_utils.find_test_caller()
+                if caller:
+                    message = '(%s) %s' % (caller, message)
+                raise exceptions.TimeoutException(message)
+            time.sleep(self.build_interval)
+
+    def wait_for_resource_activation(self, id):
+        """Waits for a resource to become active
+
+        This method will loop over is_resource_active until either
+        is_resource_active returns True or the build timeout is reached. This
+        depends on is_resource_active being implemented
+
+        :param str id: The id of the resource to check
+        :raises TimeoutException: If the build_timeout has elapsed and the
+                                  resource still hasn't been active
+        """
+        start_time = int(time.time())
+        while True:
+            if self.is_resource_active(id):
+                return
+            if int(time.time()) - start_time >= self.build_timeout:
+                message = ('Failed to reach active state %(resource_type)s '
+                           '%(id)s within the required time (%(timeout)s s).' %
                            {'resource_type': self.resource_type, 'id': id,
                             'timeout': self.build_timeout})
                 caller = test_utils.find_test_caller()
@@ -905,6 +952,12 @@
                    % self.__class__.__name__)
         raise NotImplementedError(message)
 
+    def is_resource_active(self, id):
+        """Subclasses override with specific active detection."""
+        message = ('"%s" does not implement is_resource_active'
+                   % self.__class__.__name__)
+        raise NotImplementedError(message)
+
     @property
     def resource_type(self):
         """Returns the primary type of resource this client works with."""
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index 3a05f27..ee15375 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -14,13 +14,13 @@
 #    under the License.
 
 
+import io
 import select
 import socket
 import time
 import warnings
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import exceptions
 
@@ -65,9 +65,9 @@
         self.username = username
         self.port = port
         self.password = password
-        if isinstance(pkey, six.string_types):
+        if isinstance(pkey, str):
             pkey = paramiko.RSAKey.from_private_key(
-                six.StringIO(str(pkey)))
+                io.StringIO(str(pkey)))
         self.pkey = pkey
         self.look_for_keys = look_for_keys
         self.key_filename = key_filename
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index 510fc36..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,17 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-# This make disable relative module import
-from __future__ import absolute_import
-
-
-import six
-
-if six.PY2:
-    # module thread is removed in Python 3
-    from thread import get_ident  # noqa: H237,F401
-
-else:
-    # On Python3 thread module has been deprecated and get_ident has been moved
-    # to threading module
-    from threading import get_ident  # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident  # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 7f94612..1e94f86 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -19,7 +19,6 @@
 import uuid
 
 from oslo_utils import uuidutils
-import six.moves
 
 
 def rand_uuid():
@@ -129,7 +128,7 @@
     :rtype: string
     """
     guid = []
-    for i in range(8):
+    for _ in range(8):
         guid.append("%02x" % random.randint(0x00, 0xff))
     return ':'.join(guid)
 
@@ -169,7 +168,9 @@
     :return: size randomly bytes
     :rtype: string
     """
-    return b''.join([six.int2byte(random.randint(0, 255))
+    if size > 1 << 20:
+        raise RuntimeError('Size should be less than 1MiB')
+    return b''.join([bytes((random.randint(0, 255),))
                      for i in range(size)])
 
 
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index 8ac1d38..d84dd28 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -11,12 +11,10 @@
 #    under the License.
 
 import functools
-import re
 import sys
 
 import netaddr
 from oslo_log import log as logging
-import six
 
 from tempest.lib.common import ssh
 from tempest.lib.common.utils import test_utils
@@ -56,8 +54,8 @@
                             except Exception:
                                 msg = 'Could not get console_log for server %s'
                                 LOG.debug(msg, self.server['id'])
-                    # re-raise the original ssh timeout exception
-                    six.reraise(*original_exception)
+                    # raise the original ssh timeout exception
+                    raise
                 finally:
                     # Delete the traceback to avoid circular references
                     _, _, trace = original_exception
@@ -134,9 +132,8 @@
         This method will not unmount the config drive, so unmount_config_drive
         must be used for cleanup.
         """
-        cmd_blkid = 'blkid | grep -i config-2'
-        result = self.exec_command(cmd_blkid)
-        dev_name = re.match('([^:]+)', result).group()
+        cmd_blkid = 'blkid -L config-2 -o device'
+        dev_name = self.exec_command(cmd_blkid).strip()
 
         try:
             self.exec_command('sudo mount %s /mnt' % dev_name)
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index 2a9f3a9..4cf8351 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -80,10 +80,19 @@
 
 def call_and_ignore_notfound_exc(func, *args, **kwargs):
     """Call the given function and pass if a `NotFound` exception is raised."""
-    try:
-        return func(*args, **kwargs)
-    except exceptions.NotFound:
-        pass
+    attempt = 0
+    while True:
+        attempt += 1
+        try:
+            return func(*args, **kwargs)
+        except exceptions.NotFound:
+            return
+        except exceptions.ServerFault:
+            # NOTE(danms): Tolerate three ServerFault exceptions while trying
+            # to do this thing, and after that, assume it's legit.
+            if attempt >= 3:
+                raise
+            LOG.warning('Got ServerFault while running %s, retrying...', func)
 
 
 def call_until_true(func, duration, sleep_for, *args, **kwargs):
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index 808e0fb..a4633ca 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -16,7 +16,6 @@
 import uuid
 
 from oslo_log import log as logging
-import six
 import testtools
 
 from tempest.lib import exceptions as lib_exc
@@ -72,19 +71,13 @@
     def decorator(f):
         @functools.wraps(f)
         def wrapper(*func_args, **func_kwargs):
-            skip = False
-            msg = ''
-            if "condition" in kwargs:
-                if kwargs["condition"] is True:
-                    skip = True
-            else:
-                skip = True
-            if "bug" in kwargs and skip is True:
-                bug = kwargs['bug']
+            condition = kwargs.get('condition', True)
+            bug = kwargs.get('bug', None)
+            if bug and condition:
                 bug_type = kwargs.get('bug_type', 'launchpad')
                 bug_url = _get_bug_url(bug, bug_type)
-                msg = "Skipped until bug: %s is resolved." % bug_url
-                raise testtools.TestCase.skipException(msg)
+                raise testtools.TestCase.skipException(
+                    "Skipped until bug: %s is resolved." % bug_url)
             return f(*func_args, **func_kwargs)
         return wrapper
     return decorator
@@ -116,7 +109,7 @@
 
 def idempotent_id(id):
     """Stub for metadata decorator"""
-    if not isinstance(id, six.string_types):
+    if not isinstance(id, str):
         raise TypeError('Test idempotent_id must be string not %s'
                         '' % type(id).__name__)
     uuid.UUID(id)
@@ -124,7 +117,7 @@
     def decorator(f):
         f = testtools.testcase.attr('id-%s' % id)(f)
         if f.__doc__:
-            f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
+            f.__doc__ = 'Test idempotent id: %s\n\n%s' % (id, f.__doc__)
         else:
             f.__doc__ = 'Test idempotent id: %s' % id
         return f
@@ -146,7 +139,7 @@
         # Check to see if the attr should be conditional applied.
         if 'condition' in kwargs and not kwargs.get('condition'):
             return f
-        if 'type' in kwargs and isinstance(kwargs['type'], six.string_types):
+        if 'type' in kwargs and isinstance(kwargs['type'], str):
             f = testtools.testcase.attr(kwargs['type'])(f)
         elif 'type' in kwargs and isinstance(kwargs['type'], list):
             for attr in kwargs['type']:
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 84b7ee6..abe68d2 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -294,3 +294,7 @@
 class ConsistencyGroupSnapshotException(TempestException):
     message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
                "in ERROR status")
+
+
+class InvalidScopeType(TempestException):
+    message = "Invalid scope %(scope)s"
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 90debd9..8b5c758 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -52,7 +52,6 @@
         'image.v2': image.v2,
         'network': network,
         'object-storage': object_storage,
-        'volume.v1': volume.v1,
         'volume.v2': volume.v2,
         'volume.v3': volume.v3
     }
@@ -257,7 +256,7 @@
     # class should only be used by tests hosted in Tempest.
 
     @removals.removed_kwarg('client_parameters')
-    def __init__(self, credentials, identity_uri, region=None, scope='project',
+    def __init__(self, credentials, identity_uri, region=None, scope=None,
                  disable_ssl_certificate_validation=True, ca_certs=None,
                  trace_requests='', client_parameters=None, proxy_url=None):
         """Service Clients provider
@@ -348,6 +347,14 @@
         self.ca_certs = ca_certs
         self.trace_requests = trace_requests
         self.proxy_url = proxy_url
+        if self.credentials.project_id or self.credentials.project_name:
+            scope = 'project'
+        elif self.credentials.system:
+            scope = 'system'
+        elif self.credentials.domain_id or self.credentials.domain_name:
+            scope = 'domain'
+        else:
+            scope = 'project'
         # Creates an auth provider for the credentials
         self.auth_provider = auth_provider_class(
             self.credentials, self.identity_uri, scope=scope,
diff --git a/tempest/lib/services/compute/__init__.py b/tempest/lib/services/compute/__init__.py
index 91e896a..8d07a45 100644
--- a/tempest/lib/services/compute/__init__.py
+++ b/tempest/lib/services/compute/__init__.py
@@ -14,6 +14,8 @@
 
 from tempest.lib.services.compute.agents_client import AgentsClient
 from tempest.lib.services.compute.aggregates_client import AggregatesClient
+from tempest.lib.services.compute.assisted_volume_snapshots_client import \
+    AssistedVolumeSnapshotsClient
 from tempest.lib.services.compute.availability_zone_client import \
     AvailabilityZoneClient
 from tempest.lib.services.compute.baremetal_nodes_client import \
@@ -63,9 +65,10 @@
 from tempest.lib.services.compute.volumes_client import \
     VolumesClient
 
-__all__ = ['AgentsClient', 'AggregatesClient', 'AvailabilityZoneClient',
-           'BaremetalNodesClient', 'CertificatesClient', 'ExtensionsClient',
-           'FixedIPsClient', 'FlavorsClient', 'FloatingIPPoolsClient',
+__all__ = ['AgentsClient', 'AggregatesClient', 'AssistedVolumeSnapshotsClient',
+           'AvailabilityZoneClient', 'BaremetalNodesClient',
+           'CertificatesClient', 'ExtensionsClient', 'FixedIPsClient',
+           'FlavorsClient', 'FloatingIPPoolsClient',
            'FloatingIPsBulkClient', 'FloatingIPsClient', 'HostsClient',
            'HypervisorClient', 'ImagesClient', 'InstanceUsagesAuditLogClient',
            'InterfacesClient', 'KeyPairsClient', 'LimitsClient',
diff --git a/tempest/lib/services/compute/agents_client.py b/tempest/lib/services/compute/agents_client.py
index 12b3900..bd973dd 100644
--- a/tempest/lib/services/compute/agents_client.py
+++ b/tempest/lib/services/compute/agents_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import agents as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/assisted_volume_snapshots_client.py b/tempest/lib/services/compute/assisted_volume_snapshots_client.py
new file mode 100644
index 0000000..7a949df
--- /dev/null
+++ b/tempest/lib/services/compute/assisted_volume_snapshots_client.py
@@ -0,0 +1,64 @@
+# Copyright 2017 AT&T Corp
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.compute import base_compute_client
+
+
+class AssistedVolumeSnapshotsClient(base_compute_client.BaseComputeClient):
+    """Service client for assisted volume snapshots"""
+
+    def delete_assisted_volume_snapshot(self, volume_id, snapshot_id):
+        """Delete snapshot for the given volume id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#delete-assisted-volume-snapshot
+
+        :param volume_id: UUID of the volume
+        :param snapshot_id: The UUID of the snapshot
+        """
+        query_param = {'delete_info': json.dumps({'volume_id': volume_id})}
+        resp, body = self.delete("os-assisted-volume-snapshots/%s?%s"
+                                 % (snapshot_id,
+                                    urllib.urlencode(query_param)))
+        return rest_client.ResponseBody(resp, body)
+
+    def create_assisted_volume_snapshot(self, volume_id, snapshot_id,
+                                        **kwargs):
+        """Create a new assisted volume snapshot.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#create-assisted-volume-snapshots
+
+        :param volume_id: the source volume ID
+        :param snapshot_id: the UUID for a snapshot
+        :param type: Type of snapshot, such as qcow2
+        :param new_file: The name of image file that will be created
+        """
+        url = "os-assisted-volume-snapshots"
+        info = {"snapshot_id": snapshot_id}
+        if kwargs:
+            info.update(kwargs)
+        body = {"snapshot": {"volume_id": volume_id, "create_info": info}}
+        post_body = json.dumps(body)
+        resp, body = self.post(url, post_body)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/baremetal_nodes_client.py b/tempest/lib/services/compute/baremetal_nodes_client.py
index 3efdbce..83af451 100644
--- a/tempest/lib/services/compute/baremetal_nodes_client.py
+++ b/tempest/lib/services/compute/baremetal_nodes_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import baremetal_nodes \
     as schema
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index e22b5b2..5282405 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import flavors as schema
 from tempest.lib.api_schema.response.compute.v2_1 import flavors_access \
diff --git a/tempest/lib/services/compute/floating_ip_pools_client.py b/tempest/lib/services/compute/floating_ip_pools_client.py
index d3af050..aa065b8 100644
--- a/tempest/lib/services/compute/floating_ip_pools_client.py
+++ b/tempest/lib/services/compute/floating_ip_pools_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/floating_ips_client.py b/tempest/lib/services/compute/floating_ips_client.py
index d7a1a9b..e6b6916 100644
--- a/tempest/lib/services/compute/floating_ips_client.py
+++ b/tempest/lib/services/compute/floating_ips_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/hosts_client.py b/tempest/lib/services/compute/hosts_client.py
index 743b4ec..bbecc3b 100644
--- a/tempest/lib/services/compute/hosts_client.py
+++ b/tempest/lib/services/compute/hosts_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import hosts as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/images_client.py b/tempest/lib/services/compute/images_client.py
index b252ee9..b6d8d30 100644
--- a/tempest/lib/services/compute/images_client.py
+++ b/tempest/lib/services/compute/images_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import images as schema
 from tempest.lib.api_schema.response.compute.v2_45 import images as schemav245
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index e1c02fa..9244a4a 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -16,15 +16,22 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema
+from tempest.lib.api_schema.response.compute.v2_70 import interfaces as \
+    schemav270
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
 
 class InterfacesClient(base_compute_client.BaseComputeClient):
 
+    schema_versions_info = [
+        {'min': None, 'max': '2.69', 'schema': schema},
+        {'min': '2.70', 'max': None, 'schema': schemav270}]
+
     def list_interfaces(self, server_id):
         resp, body = self.get('servers/%s/os-interface' % server_id)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.list_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -40,6 +47,7 @@
         resp, body = self.post('servers/%s/os-interface' % server_id,
                                body=post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.get_create_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -47,6 +55,7 @@
         resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
                                                               port_id))
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.get_create_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/compute/keypairs_client.py b/tempest/lib/services/compute/keypairs_client.py
index 47cf2d0..9d7b7fc 100644
--- a/tempest/lib/services/compute/keypairs_client.py
+++ b/tempest/lib/services/compute/keypairs_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import keypairs as schemav21
 from tempest.lib.api_schema.response.compute.v2_2 import keypairs as schemav22
diff --git a/tempest/lib/services/compute/migrations_client.py b/tempest/lib/services/compute/migrations_client.py
index 23de064..8a6e62a 100644
--- a/tempest/lib/services/compute/migrations_client.py
+++ b/tempest/lib/services/compute/migrations_client.py
@@ -12,12 +12,15 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import migrations as schema
 from tempest.lib.api_schema.response.compute.v2_23 import migrations \
     as schemav223
+from tempest.lib.api_schema.response.compute.v2_59 import migrations \
+    as schemav259
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -25,7 +28,8 @@
 class MigrationsClient(base_compute_client.BaseComputeClient):
     schema_versions_info = [
         {'min': None, 'max': '2.22', 'schema': schema},
-        {'min': '2.23', 'max': None, 'schema': schemav223}]
+        {'min': '2.23', 'max': '2.58', 'schema': schemav223},
+        {'min': '2.59', 'max': None, 'schema': schemav259}]
 
     def list_migrations(self, **params):
         """List all migrations.
diff --git a/tempest/lib/services/compute/quotas_client.py b/tempest/lib/services/compute/quotas_client.py
index 12e865e..dd796aa 100644
--- a/tempest/lib/services/compute/quotas_client.py
+++ b/tempest/lib/services/compute/quotas_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import quotas as schema
 from tempest.lib.api_schema.response.compute.v2_36 import quotas as schemav236
diff --git a/tempest/lib/services/compute/security_groups_client.py b/tempest/lib/services/compute/security_groups_client.py
index 9493144..0bba990 100644
--- a/tempest/lib/services/compute/security_groups_client.py
+++ b/tempest/lib/services/compute/security_groups_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import \
     security_groups as schema
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index a687137..ed3d4c0 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -16,9 +16,9 @@
 #    under the License.
 
 import copy
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import \
     security_groups as security_groups_schema
@@ -29,12 +29,17 @@
 from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
 from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
 from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as schemav251
 from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
 from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
+from tempest.lib.api_schema.response.compute.v2_58 import servers as schemav258
 from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
+from tempest.lib.api_schema.response.compute.v2_62 import servers as schemav262
 from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
 from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
 from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
+from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
+from tempest.lib.api_schema.response.compute.v2_79 import servers as schemav279
 from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
 from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
 from tempest.lib.common import rest_client
@@ -54,12 +59,17 @@
         {'min': '2.19', 'max': '2.25', 'schema': schemav219},
         {'min': '2.26', 'max': '2.46', 'schema': schemav226},
         {'min': '2.47', 'max': '2.47', 'schema': schemav247},
-        {'min': '2.48', 'max': '2.53', 'schema': schemav248},
+        {'min': '2.48', 'max': '2.50', 'schema': schemav248},
+        {'min': '2.51', 'max': '2.53', 'schema': schemav251},
         {'min': '2.54', 'max': '2.56', 'schema': schemav254},
-        {'min': '2.57', 'max': '2.62', 'schema': schemav257},
+        {'min': '2.57', 'max': '2.57', 'schema': schemav257},
+        {'min': '2.58', 'max': '2.61', 'schema': schemav258},
+        {'min': '2.62', 'max': '2.62', 'schema': schemav262},
         {'min': '2.63', 'max': '2.69', 'schema': schemav263},
         {'min': '2.70', 'max': '2.70', 'schema': schemav270},
-        {'min': '2.71', 'max': None, 'schema': schemav271}]
+        {'min': '2.71', 'max': '2.72', 'schema': schemav271},
+        {'min': '2.73', 'max': '2.78', 'schema': schemav273},
+        {'min': '2.79', 'max': None, 'schema': schemav279}]
 
     def __init__(self, auth_provider, service, region,
                  enable_instance_password=True, **kwargs):
@@ -204,11 +214,17 @@
     def action(self, server_id, action_name,
                schema=schema.server_actions_common_schema,
                **kwargs):
-        post_body = json.dumps({action_name: kwargs})
+        if 'body' in kwargs:
+            post_body = json.dumps(kwargs['body'])
+        else:
+            post_body = json.dumps({action_name: kwargs})
         resp, body = self.post('servers/%s/action' % server_id,
                                post_body)
         if body:
             body = json.loads(body)
+        else:
+            if isinstance(body, bytes):
+                body = body.decode('utf-8')
         self.validate_response(schema, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -603,6 +619,15 @@
         API reference:
         https://docs.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
         """
+        # NOTE(gmann): pass None as request body if nothing is requested.
+        # Nova started the request body check since 2.77 microversion and only
+        # accept AZ or None as valid req body and reject the empty dict {}.
+        # Before 2.77 microverison anything is valid body as Nova does not
+        # check the request body but as per api-ref None is valid request
+        # body to pass so we do not need to check the requested microversion
+        # here and always default req body to None.
+        if not kwargs:
+            kwargs['body'] = {'unshelve': None}
         return self.action(server_id, 'unshelve', **kwargs)
 
     def shelve_offload_server(self, server_id, **kwargs):
@@ -629,7 +654,7 @@
 
         For a full list of available parameters, please refer to the official
         API reference:
-        https://docs.openstack.org/api-ref/compute/#create-remote-console
+        https://docs.openstack.org/api-ref/compute/#create-console
         """
         param = {
             'remote_console': {
@@ -696,6 +721,7 @@
         resp, body = self.get("servers/%s/os-instance-actions/%s" %
                               (server_id, request_id))
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.show_instance_action, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/compute/services_client.py b/tempest/lib/services/compute/services_client.py
index 4e3383f..7d9f3e2 100644
--- a/tempest/lib/services/compute/services_client.py
+++ b/tempest/lib/services/compute/services_client.py
@@ -14,8 +14,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import services as schema
 from tempest.lib.api_schema.response.compute.v2_11 import services \
diff --git a/tempest/lib/services/compute/snapshots_client.py b/tempest/lib/services/compute/snapshots_client.py
index 225eb8d..2e6f7cf 100644
--- a/tempest/lib/services/compute/snapshots_client.py
+++ b/tempest/lib/services/compute/snapshots_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import snapshots as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/tenant_usages_client.py b/tempest/lib/services/compute/tenant_usages_client.py
index a34730c..b47d917 100644
--- a/tempest/lib/services/compute/tenant_usages_client.py
+++ b/tempest/lib/services/compute/tenant_usages_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import tenant_usages
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/volumes_client.py b/tempest/lib/services/compute/volumes_client.py
index 11282ee..52172ed 100644
--- a/tempest/lib/services/compute/volumes_client.py
+++ b/tempest/lib/services/compute/volumes_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import volumes as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/identity/v2/identity_client.py b/tempest/lib/services/identity/v2/identity_client.py
index d7526f3..6239ba6 100644
--- a/tempest/lib/services/identity/v2/identity_client.py
+++ b/tempest/lib/services/identity/v2/identity_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/roles_client.py b/tempest/lib/services/identity/v2/roles_client.py
index a133fc3..1580c33 100644
--- a/tempest/lib/services/identity/v2/roles_client.py
+++ b/tempest/lib/services/identity/v2/roles_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/services_client.py b/tempest/lib/services/identity/v2/services_client.py
index fc51cb4..2a0e5ca 100644
--- a/tempest/lib/services/identity/v2/services_client.py
+++ b/tempest/lib/services/identity/v2/services_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/tenants_client.py b/tempest/lib/services/identity/v2/tenants_client.py
index 09618ad..3435835 100644
--- a/tempest/lib/services/identity/v2/tenants_client.py
+++ b/tempest/lib/services/identity/v2/tenants_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/token_client.py b/tempest/lib/services/identity/v2/token_client.py
index 9f10f58..1191154 100644
--- a/tempest/lib/services/identity/v2/token_client.py
+++ b/tempest/lib/services/identity/v2/token_client.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
@@ -125,15 +124,3 @@
             return body['token']['id'], body
         else:
             return body['token']['id']
-
-
-class TokenClientJSON(TokenClient):
-    LOG = logging.getLogger(__name__)
-
-    def _warn(self):
-        self.LOG.warning("%s class was deprecated and renamed to %s",
-                         self.__class__.__name__, 'TokenClient')
-
-    def __init__(self, *args, **kwargs):
-        self._warn()
-        super(TokenClientJSON, self).__init__(*args, **kwargs)
diff --git a/tempest/lib/services/identity/v2/users_client.py b/tempest/lib/services/identity/v2/users_client.py
index 72f29be..c3217c9 100644
--- a/tempest/lib/services/identity/v2/users_client.py
+++ b/tempest/lib/services/identity/v2/users_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index da1c51c..af09fb1 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+from tempest.lib.services.identity.v3.access_rules_client import \
+    AccessRulesClient
 from tempest.lib.services.identity.v3.application_credentials_client import \
     ApplicationCredentialsClient
 from tempest.lib.services.identity.v3.catalog_client import \
@@ -30,6 +32,7 @@
 from tempest.lib.services.identity.v3.identity_client import IdentityClient
 from tempest.lib.services.identity.v3.inherited_roles_client import \
     InheritedRolesClient
+from tempest.lib.services.identity.v3.limits_client import LimitsClient
 from tempest.lib.services.identity.v3.oauth_consumers_client import \
     OAUTHConsumerClient
 from tempest.lib.services.identity.v3.oauth_token_client import \
@@ -48,11 +51,13 @@
 from tempest.lib.services.identity.v3.users_client import UsersClient
 from tempest.lib.services.identity.v3.versions_client import VersionsClient
 
-__all__ = ['ApplicationCredentialsClient', 'CatalogClient',
-           'CredentialsClient', 'DomainsClient', 'DomainConfigurationClient',
-           'EndPointGroupsClient', 'EndPointsClient', 'EndPointsFilterClient',
+__all__ = ['AccessRulesClient', 'ApplicationCredentialsClient',
+           'CatalogClient', 'CredentialsClient', 'DomainsClient',
+           'DomainConfigurationClient', 'EndPointGroupsClient',
+           'EndPointsClient', 'EndPointsFilterClient',
            'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
-           'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
-           'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
-           'RoleAssignmentsClient', 'RolesClient', 'ServicesClient',
-           'V3TokenClient', 'TrustsClient', 'UsersClient', 'VersionsClient']
+           'LimitsClient', 'OAUTHConsumerClient', 'OAUTHTokenClient',
+           'PoliciesClient', 'ProjectsClient', 'ProjectTagsClient',
+           'RegionsClient', 'RoleAssignmentsClient', 'RolesClient',
+           'ServicesClient', 'V3TokenClient', 'TrustsClient', 'UsersClient',
+           'VersionsClient']
diff --git a/tempest/lib/services/identity/v3/access_rules_client.py b/tempest/lib/services/identity/v3/access_rules_client.py
new file mode 100644
index 0000000..c3be5df
--- /dev/null
+++ b/tempest/lib/services/identity/v3/access_rules_client.py
@@ -0,0 +1,69 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
+"""
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class AccessRulesClient(rest_client.RestClient):
+    api_version = "v3"
+
+    def show_access_rule(self, user_id, access_rule_id):
+        """Gets details of an access rule.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#show-access-rule-details
+        """
+        resp, body = self.get('users/%s/access_rules/%s' %
+                              (user_id, access_rule_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_access_rules(self, user_id, **params):
+        """Lists out all of a user's access rules.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#list-access-rules
+        """
+        url = 'users/%s/access_rules' % user_id
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_access_rule(self, user_id, access_rule_id):
+        """Deletes an access rule.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#delete-access-rule
+        """
+        resp, body = self.delete('users/%s/access_rules/%s' %
+                                 (user_id, access_rule_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/application_credentials_client.py b/tempest/lib/services/identity/v3/application_credentials_client.py
index be2e172..e7f3ac2 100644
--- a/tempest/lib/services/identity/v3/application_credentials_client.py
+++ b/tempest/lib/services/identity/v3/application_credentials_client.py
@@ -18,8 +18,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/credentials_client.py b/tempest/lib/services/identity/v3/credentials_client.py
index 3f4b40e..27f6156 100644
--- a/tempest/lib/services/identity/v3/credentials_client.py
+++ b/tempest/lib/services/identity/v3/credentials_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#credentials
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/domains_client.py b/tempest/lib/services/identity/v3/domains_client.py
index bd32cfc..c1d1980 100644
--- a/tempest/lib/services/identity/v3/domains_client.py
+++ b/tempest/lib/services/identity/v3/domains_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/endpoint_filter_client.py b/tempest/lib/services/identity/v3/endpoint_filter_client.py
index ce84869..2d5c8c9 100644
--- a/tempest/lib/services/identity/v3/endpoint_filter_client.py
+++ b/tempest/lib/services/identity/v3/endpoint_filter_client.py
@@ -66,3 +66,57 @@
             % (project_id, endpoint_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
+
+    def list_endpoint_groups_for_project(self, project_id):
+        """List Endpoint Groups Associated with Project."""
+        resp, body = self.get(
+            self.ep_filter + '/projects/%s/endpoint_groups'
+            % project_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_projects_for_endpoint_group(self, endpoint_group_id):
+        """List Projects Associated with Endpoint Group."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/projects'
+            % endpoint_group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_endpoints_for_endpoint_group(self, endpoint_group_id):
+        """List Endpoints Associated with Endpoint Group."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/endpoints'
+            % endpoint_group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+        """Create Endpoint Group to Project Association."""
+        body = None
+        resp, body = self.put(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id), body)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_endpoint_group_for_project(self, endpoint_group_id, project_id):
+        """Get Endpoint Group to Project Association."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_endpoint_group_from_project(
+        self, endpoint_group_id, project_id):
+        """Delete Endpoint Group to Project Association."""
+        resp, body = self.delete(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/endpoints_client.py b/tempest/lib/services/identity/v3/endpoints_client.py
index 236b34c..de85388 100644
--- a/tempest/lib/services/identity/v3/endpoints_client.py
+++ b/tempest/lib/services/identity/v3/endpoints_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index f823b21..6f82067 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#groups
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -110,6 +111,6 @@
 
     def check_group_user_existence(self, group_id, user_id):
         """Check user in group."""
-        resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+        resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/identity_providers_client.py b/tempest/lib/services/identity/v3/identity_providers_client.py
new file mode 100644
index 0000000..002bc8c
--- /dev/null
+++ b/tempest/lib/services/identity/v3/identity_providers_client.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class IdentityProvidersClient(rest_client.RestClient):
+
+    def register_identity_provider(self, identity_provider_id, **kwargs):
+        """Register an identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-an-identity-provider
+        """
+        post_body = json.dumps({'identity_provider': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+            post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_identity_providers(self, **params):
+        """List identity providers.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-identity-providers
+        """
+        url = 'identity_providers'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_identity_provider(self, identity_provider_id):
+        """Get identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-identity-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_identity_provider(self, identity_provider_id):
+        """Delete identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-identity-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_identity_provider(self, identity_provider_id, **kwargs):
+        """Update identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-identity-provider
+        """
+        post_body = json.dumps({'identity_provider': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+            post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 3949437..f937ed6 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -51,7 +51,7 @@
     def check_user_inherited_project_role_on_domain(
             self, domain_id, user_id, role_id):
         """Checks whether a user has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/users/%s/roles/%s/inherited_to_projects"
             % (domain_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -88,7 +88,7 @@
     def check_group_inherited_project_role_on_domain(
             self, domain_id, group_id, role_id):
         """Checks whether a group has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/groups/%s/roles/%s/inherited_to_projects"
             % (domain_id, group_id, role_id))
         self.expected_success(204, resp.status)
@@ -115,7 +115,7 @@
     def check_user_has_flag_on_inherited_to_project(
             self, project_id, user_id, role_id):
         """Check if user has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
             % (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -142,7 +142,7 @@
     def check_group_has_flag_on_inherited_to_project(
             self, project_id, group_id, role_id):
         """Check if group has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
             % (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/limits_client.py b/tempest/lib/services/identity/v3/limits_client.py
new file mode 100644
index 0000000..26d04bc
--- /dev/null
+++ b/tempest/lib/services/identity/v3/limits_client.py
@@ -0,0 +1,57 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class LimitsClient(rest_client.RestClient):
+    api_version = "v3"
+
+    def get_registered_limits(self):
+        """Lists all registered limits."""
+        resp, body = self.get('registered_limits')
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def create_limit(self, region_id, service_id, project_id, resource_name,
+                     default_limit, description=None, domain_id=None):
+        """Creates a limit in keystone."""
+        limit = {
+            'service_id': service_id,
+            'project_id': project_id,
+            'resource_name': resource_name,
+            'resource_limit': default_limit,
+            'region_id': region_id,
+            'description': description or '%s limit for %s' % (
+                resource_name, project_id),
+        }
+        if domain_id:
+            limit['domain_id'] = domain_id
+        post_body = json.dumps({'limits': [limit]})
+        resp, body = self.post('limits', post_body)
+        self.expected_success(201, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
+
+    def update_limit(self, limit_id, resource_limit, description=None):
+        """Updates a limit in keystone by id."""
+
+        limit = {'resource_limit': resource_limit}
+        if description:
+            limit['description'] = description
+        patch_body = json.dumps({'limit': limit})
+        resp, body = self.patch('limits/%s' % limit_id, patch_body)
+        self.expected_success(200, resp.status)
+        return rest_client.ResponseBody(resp, json.loads(body))
diff --git a/tempest/lib/services/identity/v3/mappings_client.py b/tempest/lib/services/identity/v3/mappings_client.py
new file mode 100644
index 0000000..a924b33
--- /dev/null
+++ b/tempest/lib/services/identity/v3/mappings_client.py
@@ -0,0 +1,91 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class MappingsClient(rest_client.RestClient):
+
+    def create_mapping(self, mapping_id, **kwargs):
+        """Create a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#create-a-mapping
+        """
+        post_body = json.dumps({'mapping': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_mapping(self, mapping_id):
+        """Get a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-a-mapping
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/mappings/%s' % mapping_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_mapping(self, mapping_id, **kwargs):
+        """Update a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-a-mapping
+        """
+        post_body = json.dumps({'mapping': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_mappings(self, **kwargs):
+        """List mappings.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-mappings
+        """
+        url = 'OS-FEDERATION/mappings'
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_mapping(self, mapping_id):
+        """Delete a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-mapping
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/mappings/%s' % mapping_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index 6ca401b..564d6d6 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -18,9 +18,7 @@
 import hmac
 import random
 import time
-
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from oslo_serialization import jsonutils as json
 
@@ -33,9 +31,9 @@
     def _escape(self, s):
         """Escape a unicode string in an OAuth-compatible fashion."""
         safe = b'~'
-        s = s.encode('utf-8') if isinstance(s, six.text_type) else s
+        s = s.encode('utf-8') if isinstance(s, str) else s
         s = urlparse.quote(s, safe)
-        if isinstance(s, six.binary_type):
+        if isinstance(s, bytes):
             s = s.decode('utf-8')
         return s
 
@@ -47,8 +45,8 @@
                                         verifier=None,
                                         http_method='GET'):
         """Generate OAUTH params along with signature."""
-        timestamp = six.text_type(int(time.time()))
-        nonce = six.text_type(random.getrandbits(64)) + timestamp
+        timestamp = str(int(time.time()))
+        nonce = str(random.getrandbits(64)) + timestamp
         oauth_params = [
             ('oauth_nonce', nonce),
             ('oauth_timestamp', timestamp),
@@ -71,7 +69,7 @@
         normalized_params = '&'.join(parameter_parts)
 
         # normalize_uri
-        scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+        scheme, netloc, path, params, _, _ = urlparse.urlparse(uri)
         scheme = scheme.lower()
         netloc = netloc.lower()
         path = path.replace('//', '/')
diff --git a/tempest/lib/services/identity/v3/policies_client.py b/tempest/lib/services/identity/v3/policies_client.py
index 31c0d18..41def38 100644
--- a/tempest/lib/services/identity/v3/policies_client.py
+++ b/tempest/lib/services/identity/v3/policies_client.py
@@ -185,3 +185,27 @@
         resp, body = self.delete(url)
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
+
+    def list_endpoints_for_policy(self, policy_id):
+        """List policy and service endpoint associations.
+
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/#list-policy-and-service-endpoint-associations
+        """
+        url = "policies/{0}/OS-ENDPOINT-POLICY/endpoints".format(policy_id)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_policy_for_endpoint(self, endpoint_id):
+        """Show the effective policy associated with an endpoint
+
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/#show-the-effective-policy-associated-with-an-endpoint
+        """
+        url = "endpoints/{0}/OS-ENDPOINT-POLICY/policy".format(endpoint_id)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/projects_client.py b/tempest/lib/services/identity/v3/projects_client.py
index b186fba..fffbe7a 100644
--- a/tempest/lib/services/identity/v3/projects_client.py
+++ b/tempest/lib/services/identity/v3/projects_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/protocols_client.py b/tempest/lib/services/identity/v3/protocols_client.py
new file mode 100644
index 0000000..19aa426
--- /dev/null
+++ b/tempest/lib/services/identity/v3/protocols_client.py
@@ -0,0 +1,97 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class ProtocolsClient(rest_client.RestClient):
+
+    def add_protocol_to_identity_provider(self, idp_id, protocol_id,
+                                          **kwargs):
+        """Add protocol to identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#add-protocol-to-identity-provider
+        """
+        post_body = json.dumps({'protocol': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id), post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_protocols_of_identity_provider(self, idp_id, **kwargs):
+        """List protocols of identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-protocols-of-identity-provider
+        """
+        url = 'OS-FEDERATION/identity_providers/%s/protocols' % idp_id
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_protocol_for_identity_provider(self, idp_id, protocol_id):
+        """Get protocol for identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-protocol-for-identity-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_mapping_for_identity_provider(self, idp_id, protocol_id,
+                                             **kwargs):
+        """Update attribute mapping for identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-attribute-mapping-for-identity-provider
+        """
+        post_body = json.dumps({'protocol': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id), post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_protocol_from_identity_provider(self, idp_id, protocol_id):
+        """Delete a protocol from identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-protocol-from-identity-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/regions_client.py b/tempest/lib/services/identity/v3/regions_client.py
index a598c9c..3aed5b8 100644
--- a/tempest/lib/services/identity/v3/regions_client.py
+++ b/tempest/lib/services/identity/v3/regions_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#regions
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/role_assignments_client.py b/tempest/lib/services/identity/v3/role_assignments_client.py
index 51ee8f6..f615709 100644
--- a/tempest/lib/services/identity/v3/role_assignments_client.py
+++ b/tempest/lib/services/identity/v3/role_assignments_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index f9356be..4836784 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -89,6 +90,13 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def create_user_role_on_system(self, user_id, role_id):
+        """Add roles to a user on the system."""
+        resp, body = self.put('system/users/%s/roles/%s' %
+                              (user_id, role_id), None)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def list_user_roles_on_project(self, project_id, user_id):
         """list roles of a user on a project."""
         resp, body = self.get('projects/%s/users/%s/roles' %
@@ -105,6 +113,13 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def list_user_roles_on_system(self, user_id):
+        """list roles of a user on the system."""
+        resp, body = self.get('system/users/%s/roles' % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def delete_role_from_user_on_project(self, project_id, user_id, role_id):
         """Delete role of a user on a project."""
         resp, body = self.delete('projects/%s/users/%s/roles/%s' %
@@ -119,19 +134,32 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def delete_role_from_user_on_system(self, user_id, role_id):
+        """Delete role of a user on the system."""
+        resp, body = self.delete('system/users/%s/roles/%s' %
+                                 (user_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def check_user_role_existence_on_project(self, project_id,
                                              user_id, role_id):
         """Check role of a user on a project."""
-        resp, body = self.head('projects/%s/users/%s/roles/%s' %
-                               (project_id, user_id, role_id))
+        resp, _ = self.head('projects/%s/users/%s/roles/%s' %
+                            (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_user_role_existence_on_domain(self, domain_id,
                                             user_id, role_id):
         """Check role of a user on a domain."""
-        resp, body = self.head('domains/%s/users/%s/roles/%s' %
-                               (domain_id, user_id, role_id))
+        resp, _ = self.head('domains/%s/users/%s/roles/%s' %
+                            (domain_id, user_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
+
+    def check_user_role_existence_on_system(self, user_id, role_id):
+        """Check role of a user on the system."""
+        resp, body = self.head('system/users/%s/roles/%s' % (user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -149,6 +177,13 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def create_group_role_on_system(self, group_id, role_id):
+        """Add roles to a group on the system."""
+        resp, body = self.put('system/groups/%s/roles/%s' %
+                              (group_id, role_id), None)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def list_group_roles_on_project(self, project_id, group_id):
         """list roles of a group on a project."""
         resp, body = self.get('projects/%s/groups/%s/roles' %
@@ -165,6 +200,13 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def list_group_roles_on_system(self, group_id):
+        """list roles of a group on the system."""
+        resp, body = self.get('system/groups/%s/roles' % group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def delete_role_from_group_on_project(self, project_id, group_id, role_id):
         """Delete role of a group on a project."""
         resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
@@ -179,19 +221,33 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def delete_role_from_group_on_system(self, group_id, role_id):
+        """Delete role of a group on the system."""
+        resp, body = self.delete('system/groups/%s/roles/%s' %
+                                 (group_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def check_role_from_group_on_project_existence(self, project_id,
                                                    group_id, role_id):
         """Check role of a group on a project."""
-        resp, body = self.head('projects/%s/groups/%s/roles/%s' %
-                               (project_id, group_id, role_id))
+        resp, _ = self.head('projects/%s/groups/%s/roles/%s' %
+                            (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_role_from_group_on_domain_existence(self, domain_id,
                                                   group_id, role_id):
         """Check role of a group on a domain."""
-        resp, body = self.head('domains/%s/groups/%s/roles/%s' %
-                               (domain_id, group_id, role_id))
+        resp, _ = self.head('domains/%s/groups/%s/roles/%s' %
+                            (domain_id, group_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
+
+    def check_role_from_group_on_system_existence(self, group_id, role_id):
+        """Check role of a group on the system."""
+        resp, body = self.head('system/groups/%s/roles/%s' %
+                               (group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -232,14 +288,14 @@
 
     def check_role_inference_rule(self, prior_role, implies_role):
         """Check a role inference rule."""
-        resp, body = self.head('roles/%s/implies/%s' %
-                               (prior_role, implies_role))
+        resp, _ = self.head('roles/%s/implies/%s' %
+                            (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def delete_role_inference_rule(self, prior_role, implies_role):
         """Delete a role inference rule."""
-        resp, body = self.delete('roles/%s/implies/%s' %
-                                 (prior_role, implies_role))
+        resp, _ = self.delete('roles/%s/implies/%s' %
+                              (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/service_providers_client.py b/tempest/lib/services/identity/v3/service_providers_client.py
new file mode 100644
index 0000000..5d4f014
--- /dev/null
+++ b/tempest/lib/services/identity/v3/service_providers_client.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class ServiceProvidersClient(rest_client.RestClient):
+
+    def register_service_provider(self, service_provider_id, **kwargs):
+        """Register a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-a-service-provider
+        """
+        post_body = json.dumps({'service_provider': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id,
+            post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_service_providers(self, **kwargs):
+        """List service providers.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-service-providers
+        """
+        url = 'OS-FEDERATION/service_providers'
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_service_provider(self, service_provider_id):
+        """Get a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-service-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_service_provider(self, service_provider_id):
+        """Delete a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-service-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_service_provider(self, service_provider_id, **kwargs):
+        """Update a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-service-provider
+        """
+        post_body = json.dumps({'service_provider': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id,
+            post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/services_client.py b/tempest/lib/services/identity/v3/services_client.py
index eb961a5..994df2f 100644
--- a/tempest/lib/services/identity/v3/services_client.py
+++ b/tempest/lib/services/identity/v3/services_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index 6956297..c63966a 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
@@ -51,7 +50,7 @@
     def auth(self, user_id=None, username=None, password=None, project_id=None,
              project_name=None, user_domain_id=None, user_domain_name=None,
              project_domain_id=None, project_domain_name=None, domain_id=None,
-             domain_name=None, token=None, app_cred_id=None,
+             domain_name=None, system=None, token=None, app_cred_id=None,
              app_cred_secret=None):
         """Obtains a token from the authentication service
 
@@ -65,6 +64,7 @@
         :param domain_name: a domain name to scope to
         :param project_id: a project id to scope to
         :param project_name: a project name to scope to
+        :param system: whether the token should be scoped to the system
         :param token: a token to re-scope.
 
         Accepts different combinations of credentials.
@@ -74,6 +74,7 @@
         - user_id, password
         - username, password, user_domain_id
         - username, password, project_name, user_domain_id, project_domain_id
+        - username, password, user_domain_id, system
         Validation is left to the server side.
         """
         creds = {
@@ -135,6 +136,8 @@
             creds['auth']['scope'] = dict(domain={'id': domain_id})
         elif domain_name:
             creds['auth']['scope'] = dict(domain={'name': domain_name})
+        elif system:
+            creds['auth']['scope'] = dict(system={system: True})
 
         body = json.dumps(creds, sort_keys=True)
         resp, body = self.post(self.auth_url, body=body)
@@ -191,15 +194,3 @@
             return token, body['token']
         else:
             return token
-
-
-class V3TokenClientJSON(V3TokenClient):
-    LOG = logging.getLogger(__name__)
-
-    def _warn(self):
-        self.LOG.warning("%s class was deprecated and renamed to %s",
-                         self.__class__.__name__, 'V3TokenClient')
-
-    def __init__(self, *args, **kwargs):
-        self._warn()
-        super(V3TokenClientJSON, self).__init__(*args, **kwargs)
diff --git a/tempest/lib/services/identity/v3/trusts_client.py b/tempest/lib/services/identity/v3/trusts_client.py
index f1cc806..48a7956 100644
--- a/tempest/lib/services/identity/v3/trusts_client.py
+++ b/tempest/lib/services/identity/v3/trusts_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/users_client.py b/tempest/lib/services/identity/v3/users_client.py
index f47730f..771ffea 100644
--- a/tempest/lib/services/identity/v3/users_client.py
+++ b/tempest/lib/services/identity/v3/users_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -118,3 +119,30 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
+
+    def create_user_ec2_credential(self, user_id, **kwargs):
+        post_body = json.dumps(kwargs)
+        resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
+                               post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_user_ec2_credential(self, user_id, access):
+        resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
+                                 (user_id, access))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_user_ec2_credentials(self, user_id):
+        resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_user_ec2_credential(self, user_id, access):
+        resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
+                              (user_id, access))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/image/v1/__init__.py b/tempest/lib/services/image/v1/__init__.py
index 9bd8262..1f33cef 100644
--- a/tempest/lib/services/image/v1/__init__.py
+++ b/tempest/lib/services/image/v1/__init__.py
@@ -12,8 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import warnings
+
 from tempest.lib.services.image.v1.image_members_client import \
     ImageMembersClient
 from tempest.lib.services.image.v1.images_client import ImagesClient
 
 __all__ = ['ImageMembersClient', 'ImagesClient']
+
+
+warnings.warn(
+    "The tempest.lib.services.image.v1 module (Image v1 APIs service "
+    "clients) is deprecated in favor of tempest.lib.services.image.v2 "
+    "(Image v2 APIs service clients) and will be removed once Tempest stop "
+    "supporting stable Ussuri.", DeprecationWarning)
diff --git a/tempest/lib/services/image/v1/images_client.py b/tempest/lib/services/image/v1/images_client.py
index 0e76a63..c9a4a94 100644
--- a/tempest/lib/services/image/v1/images_client.py
+++ b/tempest/lib/services/image/v1/images_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 import functools
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 90778da..abf427c 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 import functools
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
@@ -121,6 +121,14 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def show_image_tasks(self, image_id):
+        """Show image tasks."""
+        url = 'images/%s/tasks' % image_id
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def is_resource_deleted(self, id):
         try:
             self.show_image(id)
@@ -128,6 +136,15 @@
             return True
         return False
 
+    def is_resource_active(self, id):
+        try:
+            image = self.show_image(id)
+            if image['status'] != 'active':
+                return False
+        except lib_exc.NotFound:
+            return False
+        return True
+
     @property
     def resource_type(self):
         """Returns the primary type of resource this client works with."""
@@ -152,6 +169,83 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def stage_image_file(self, image_id, data):
+        """Upload binary image data to staging area.
+
+        For a full list of available parameters, please refer to the official
+        API reference (stage API:
+        https://docs.openstack.org/api-ref/image/v2/#interoperable-image-import
+        """
+        url = 'images/%s/stage' % image_id
+
+        # We are going to do chunked transfer, so split the input data
+        # info fixed-sized chunks.
+        headers = {'Content-Type': 'application/octet-stream'}
+        data = iter(functools.partial(data.read, CHUNKSIZE), b'')
+
+        resp, body = self.request('PUT', url, headers=headers,
+                                  body=data, chunked=True)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def info_import(self):
+        """Return information about server-supported import methods."""
+        url = 'info/import'
+        resp, body = self.get(url)
+
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def info_stores(self):
+        """Return information about server-supported stores."""
+        url = 'info/stores'
+        resp, body = self.get(url)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def image_import(self, image_id, method='glance-direct',
+                     all_stores_must_succeed=None, all_stores=True,
+                     stores=None, image_uri=None):
+        """Import data from staging area to glance store.
+
+        For a full list of available parameters, please refer to the official
+        API reference (stage API:
+        https://docs.openstack.org/api-ref/image/v2/#interoperable-image-import
+
+        :param method: The import method (i.e. glance-direct) to use
+        :param all_stores_must_succeed: Boolean indicating if all store imports
+                                        must succeed for the import to be
+                                        considered successful. Must be None if
+                                        server does not support multistore.
+        :param all_stores: Boolean indicating if image should be imported to
+                           all available stores (incompatible with stores)
+        :param stores: A list of destination store names for the import. Must
+                       be None if server does not support multistore.
+        :param image_uri: A URL to be used with the web-download method
+        """
+        url = 'images/%s/import' % image_id
+        data = {
+            "method": {
+                "name": method
+            },
+        }
+        if stores is not None:
+            data["stores"] = stores
+        else:
+            data["all_stores"] = all_stores
+
+        if all_stores_must_succeed is not None:
+            data['all_stores_must_succeed'] = all_stores_must_succeed
+        if image_uri:
+            data['method']['uri'] = image_uri
+        data = json.dumps(data)
+        headers = {'Content-Type': 'application/json'}
+        resp, _ = self.post(url, data, headers=headers)
+
+        self.expected_success(202, resp.status)
+        return rest_client.ResponseBody(resp)
+
     def show_image_file(self, image_id):
         """Download binary image data.
 
diff --git a/tempest/lib/services/image/v2/namespace_objects_client.py b/tempest/lib/services/image/v2/namespace_objects_client.py
index 0cae816..32f5a2c 100644
--- a/tempest/lib/services/image/v2/namespace_objects_client.py
+++ b/tempest/lib/services/image/v2/namespace_objects_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/image/v2/namespace_tags_client.py b/tempest/lib/services/image/v2/namespace_tags_client.py
index 4315f16..5bca229 100644
--- a/tempest/lib/services/image/v2/namespace_tags_client.py
+++ b/tempest/lib/services/image/v2/namespace_tags_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/image/v2/versions_client.py b/tempest/lib/services/image/v2/versions_client.py
index 1b7f806..98b4fb6 100644
--- a/tempest/lib/services/image/v2/versions_client.py
+++ b/tempest/lib/services/image/v2/versions_client.py
@@ -30,3 +30,13 @@
         self.expected_success(300, resp.status)
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
+
+    def has_version(self, version):
+        """Return True if a version is supported."""
+        version = 'v%s' % version
+        supported = ['SUPPORTED', 'CURRENT']
+        versions = self.list_versions()
+        for version_struct in versions['versions']:
+            if version_struct['id'] == version:
+                return version_struct['status'] in supported
+        return False
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index f7ac046..98d7482 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,6 +15,11 @@
 from tempest.lib.services.network.agents_client import AgentsClient
 from tempest.lib.services.network.extensions_client import ExtensionsClient
 from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.floating_ips_port_forwarding_client import \
+    FloatingIpsPortForwardingClient
+from tempest.lib.services.network.log_resource_client import LogResourceClient
+from tempest.lib.services.network.loggable_resource_client import \
+    LoggableResourceClient
 from tempest.lib.services.network.metering_label_rules_client import \
     MeteringLabelRulesClient
 from tempest.lib.services.network.metering_labels_client import \
@@ -22,6 +27,8 @@
 from tempest.lib.services.network.networks_client import NetworksClient
 from tempest.lib.services.network.ports_client import PortsClient
 from tempest.lib.services.network.qos_client import QosClient
+from tempest.lib.services.network.qos_limit_bandwidth_rules_client import \
+    QosLimitBandwidthRulesClient
 from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
     QosMinimumBandwidthRulesClient
 from tempest.lib.services.network.quotas_client import QuotasClient
@@ -36,12 +43,15 @@
 from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
 from tempest.lib.services.network.subnets_client import SubnetsClient
 from tempest.lib.services.network.tags_client import TagsClient
+from tempest.lib.services.network.trunks_client import TrunksClient
 from tempest.lib.services.network.versions_client import NetworkVersionsClient
 
 __all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
-           'MeteringLabelRulesClient', 'MeteringLabelsClient',
-           'NetworksClient', 'NetworkVersionsClient', 'PortsClient',
-           'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
-           'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
+           'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
+           'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
+           'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
+           'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
+           'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
-           'SubnetsClient', 'TagsClient']
+           'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
+           'LoggableResourceClient']
diff --git a/tempest/lib/services/network/base.py b/tempest/lib/services/network/base.py
index fe8b244..ee87dd4 100644
--- a/tempest/lib/services/network/base.py
+++ b/tempest/lib/services/network/base.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/network/floating_ips_port_forwarding_client.py b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..43e24ea
--- /dev/null
+++ b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
@@ -0,0 +1,78 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class FloatingIpsPortForwardingClient(base.BaseNetworkClient):
+
+    def create_port_forwarding(self, floatingip_id, **kwargs):
+        """Creates a floating IP port forwarding.
+
+        Creates port forwarding by using the configuration that you define in
+        the request object.
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings' % floatingip_id
+        post_data = {'port_forwarding': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_port_forwarding(
+            self, floatingip_id, port_forwarding_id, **kwargs):
+        """Updates a floating IP port_forwarding resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-a-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        post_data = {'port_forwarding': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_port_forwarding(
+            self, floatingip_id, port_forwarding_id, **fields):
+        """Shows details for a floating IP port forwarding id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_port_forwarding(self, floatingip_id, port_forwarding_id):
+        """Deletes a floating IP port_forwarding resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-a-floating-ip-port-forwarding
+        """
+        uri = '/floatingips/%s/port_forwardings/%s' % (
+            floatingip_id, port_forwarding_id)
+        return self.delete_resource(uri)
+
+    def list_port_forwardings(self, floatingip_id, **filters):
+        """Lists floating Ip port forwardings.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ip-port-forwardings-detail
+        """
+        uri = '/floatingips/%s/port_forwardings' % floatingip_id
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/log_resource_client.py b/tempest/lib/services/network/log_resource_client.py
new file mode 100644
index 0000000..727b138
--- /dev/null
+++ b/tempest/lib/services/network/log_resource_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LogResourceClient(base.BaseNetworkClient):
+
+    def create_log(self, **kwargs):
+        """Creates a log resource.
+
+        Creates a log resource by using the configuration that you define in
+        the request object.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-log
+        """
+        uri = '/log/logs/'
+        post_data = {'log': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_log(self, log_id, **kwargs):
+        """Updates a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-log
+        """
+        uri = '/log/logs/%s' % log_id
+        post_data = {'log': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_log(self, log_id, **fields):
+        """Shows details for a log id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.show_resource(uri, **fields)
+
+    def delete_log(self, log_id):
+        """Deletes a log resource.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-log
+        """
+        uri = '/log/logs/%s' % log_id
+        return self.delete_resource(uri)
+
+    def list_logs(self, **filters):
+        """Lists Logs.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-logs
+        """
+        uri = '/log/logs'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/loggable_resource_client.py b/tempest/lib/services/network/loggable_resource_client.py
new file mode 100644
index 0000000..774046f
--- /dev/null
+++ b/tempest/lib/services/network/loggable_resource_client.py
@@ -0,0 +1,29 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class LoggableResourceClient(base.BaseNetworkClient):
+
+    def list_loggable_resources(self, **filters):
+        """List Loggable resources.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-loggable-resources
+        """
+        uri = '/log/loggable-resources'
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..8fd87fe
--- /dev/null
+++ b/tempest/lib/services/network/qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,74 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosLimitBandwidthRulesClient(base.BaseNetworkClient):
+
+    def create_limit_bandwidth_rule(self, qos_policy_id, **kwargs):
+        """Creates a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(
+            qos_policy_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_limit_bandwidth_rule(self, qos_policy_id, rule_id, **kwargs):
+        """Updates a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        post_data = {'bandwidth_limit_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_limit_bandwidth_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a limit bandwidth rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-bandwidth-limit-rule-details
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_limit_bandwidth_rule(self, qos_policy_id, rule_id):
+        """Deletes a limit bandwidth rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-bandwidth-limit-rule
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules/{}'.format(
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_limit_bandwidth_rules(self, qos_policy_id, **filters):
+        """Lists all limit bandwidth rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-bandwidth-limit-rules-for-qos-policy
+        """
+        uri = '/qos/policies/{}/bandwidth_limit_rules'.format(qos_policy_id)
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..98bcafe
--- /dev/null
+++ b/tempest/lib/services/network/qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,73 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class QosMinimumPacketRateRulesClient(base.BaseNetworkClient):
+
+    def create_minimum_packet_rate_rule(self, qos_policy_id, **kwargs):
+        """Creates a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_minimum_packet_rate_rule(
+        self, qos_policy_id, rule_id, **kwargs
+    ):
+        """Updates a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        post_data = {'minimum_packet_rate_rule': kwargs}
+        return self.update_resource(uri, post_data)
+
+    def show_minimum_packet_rate_rule(self, qos_policy_id, rule_id, **fields):
+        """Show details of a minimum packet rate rule.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-minimum-packet-rate-rule-details
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.show_resource(uri, **fields)
+
+    def delete_minimum_packet_rate_rule(self, qos_policy_id, rule_id):
+        """Deletes a minimum packet rate rule for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-minimum-packet-rate-rule
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules/%s' % (
+            qos_policy_id, rule_id)
+        return self.delete_resource(uri)
+
+    def list_minimum_packet_rate_rules(self, qos_policy_id, **filters):
+        """Lists all minimum packet rate rules for a QoS policy.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-minimum-packet-rate-rules-for-qos-policy
+        """
+        uri = '/qos/policies/%s/minimum_packet_rate_rules' % qos_policy_id
+        return self.list_resources(uri, **filters)
diff --git a/tempest/lib/services/network/quotas_client.py b/tempest/lib/services/network/quotas_client.py
index 997d201..96cc65d 100644
--- a/tempest/lib/services/network/quotas_client.py
+++ b/tempest/lib/services/network/quotas_client.py
@@ -12,11 +12,33 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import functools
+
 from tempest.lib.services.network import base
 
 
+def _warning_deprecate_tenant_id(func):
+    @functools.wraps(func)
+    def inner(*args, **kwargs):
+        _self = args[0]
+        # check length of arg to know whether 'tenant_id' is passed as
+        # positional arg or kwargs.
+        if len(args) < 2:
+            if 'tenant_id' in kwargs:
+                _self.LOG.warning(
+                    'positional arg name "tenant_id" is deprecated, for '
+                    'removal, please start using "project_id" instead')
+            elif 'project_id' in kwargs:
+                # fallback to deprecated name till deprecation phase.
+                kwargs['tenant_id'] = kwargs.pop('project_id')
+
+        return func(*args, **kwargs)
+    return inner
+
+
 class QuotasClient(base.BaseNetworkClient):
 
+    @_warning_deprecate_tenant_id
     def update_quotas(self, tenant_id, **kwargs):
         """Update quota for a project.
 
@@ -28,12 +50,14 @@
         uri = '/quotas/%s' % tenant_id
         return self.update_resource(uri, put_body)
 
+    @_warning_deprecate_tenant_id
     def reset_quotas(self, tenant_id):  # noqa
         # NOTE: This noqa is for passing T111 check and we cannot rename
         #       to keep backwards compatibility.
         uri = '/quotas/%s' % tenant_id
         return self.delete_resource(uri)
 
+    @_warning_deprecate_tenant_id
     def show_quotas(self, tenant_id, **fields):
         """Show quota for a project.
 
@@ -54,11 +78,13 @@
         uri = '/quotas'
         return self.list_resources(uri, **filters)
 
+    @_warning_deprecate_tenant_id
     def show_default_quotas(self, tenant_id):
         """List default quotas for a project."""
         uri = '/quotas/%s/default' % tenant_id
         return self.show_resource(uri)
 
+    @_warning_deprecate_tenant_id
     def show_quota_details(self, tenant_id):
         """Show quota details for a project."""
         uri = '/quotas/%s/details.json' % tenant_id
diff --git a/tempest/lib/services/network/trunks_client.py b/tempest/lib/services/network/trunks_client.py
new file mode 100644
index 0000000..2fd9e01
--- /dev/null
+++ b/tempest/lib/services/network/trunks_client.py
@@ -0,0 +1,100 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class TrunksClient(base.BaseNetworkClient):
+
+    def create_trunk(self, **kwargs):
+        """Creates a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-trunk
+        """
+        uri = '/trunks'
+        post_data = {'trunk': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_trunk(self, trunk_id, **kwargs):
+        """Updates a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        put_data = {'trunk': kwargs}
+        return self.update_resource(uri, put_data)
+
+    def show_trunk(self, trunk_id):
+        """Shows details for a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        return self.show_resource(uri)
+
+    def delete_trunk(self, trunk_id):
+        """Deletes a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        return self.delete_resource(uri)
+
+    def list_trunks(self, **filters):
+        """Lists trunks to which the tenant has access.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-trunks
+        """
+        uri = '/trunks'
+        return self.list_resources(uri, **filters)
+
+    def add_subports_to_trunk(self, trunk_id, sub_ports):
+        """Add subports to a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#add-subports-to-trunk
+        """
+        uri = '/trunks/%s/add_subports' % trunk_id
+        put_data = {'sub_ports': sub_ports}
+        return self.update_resource(uri, put_data)
+
+    def delete_subports_from_trunk(self, trunk_id, sub_ports):
+        """Deletes subports from a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-subports-from-trunk
+        """
+        uri = '/trunks/%s/remove_subports' % trunk_id
+        put_data = {'sub_ports': sub_ports}
+        return self.update_resource(uri, put_data)
+
+    def list_subports_of_trunk(self, trunk_id):
+        """List subports of a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-subports-for-trunk
+        """
+        uri = '/trunks/%s/get_subports' % trunk_id
+        return self.list_resources(uri)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 8c15a88..52b2534 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -13,10 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
 from xml.etree import ElementTree as etree
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 027fb1f..6d07ec1 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -13,17 +13,25 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
 from xml.etree import ElementTree as etree
 
 import debtcollector.moves
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
+from tempest.lib import exceptions
 
 
 class ContainerClient(rest_client.RestClient):
 
+    def is_resource_deleted(self, container):
+        try:
+            self.list_container_metadata(container)
+        except exceptions.NotFound:
+            return True
+        return False
+
     def update_container(self, container_name, **headers):
         """Creates or Updates a container
 
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index 383aff6..65e8227 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -12,9 +12,10 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import ssl
 
-from six.moves import http_client as httplib
-from six.moves.urllib import parse as urlparse
+from http import client as httplib
+from urllib import parse as urlparse
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions
@@ -22,6 +23,15 @@
 
 class ObjectClient(rest_client.RestClient):
 
+    def is_resource_deleted(self, object_name, container):
+        try:
+            self.get_object(container, object_name)
+        except exceptions.NotFound:
+            return True
+        except exceptions.Conflict:
+            return False
+        return False
+
     def create_object(self, container, object_name, data,
                       params=None, metadata=None, headers=None,
                       chunked=False):
@@ -118,7 +128,7 @@
         path = str(parsed.path) + "/"
         path += "%s/%s" % (str(container), str(object_name))
 
-        conn = _create_connection(parsed)
+        conn = self._create_connection(parsed)
         # Send the PUT request and the headers including the "Expect" header
         conn.putrequest('PUT', path)
 
@@ -151,15 +161,20 @@
 
         return resp.status, resp.reason
 
+    def _create_connection(self, parsed_url):
+        """Helper function to create connection with httplib
 
-def _create_connection(parsed_url):
-    """Helper function to create connection with httplib
+        :param parsed_url: parsed url of the remote location
+        """
+        context = None
+        # If CONF.identity.disable_ssl_certificate_validation is true,
+        # do not check ssl certification.
+        if self.dscv:
+            context = ssl._create_unverified_context()
+        if parsed_url.scheme == 'https':
+            conn = httplib.HTTPSConnection(parsed_url.netloc,
+                                           context=context)
+        else:
+            conn = httplib.HTTPConnection(parsed_url.netloc)
 
-    :param parsed_url: parsed url of the remote location
-    """
-    if parsed_url.scheme == 'https':
-        conn = httplib.HTTPSConnection(parsed_url.netloc)
-    else:
-        conn = httplib.HTTPConnection(parsed_url.netloc)
-
-    return conn
+        return conn
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
index 5c20c57..daeaeab 100644
--- a/tempest/lib/services/placement/__init__.py
+++ b/tempest/lib/services/placement/__init__.py
@@ -14,5 +14,7 @@
 
 from tempest.lib.services.placement.placement_client import \
     PlacementClient
+from tempest.lib.services.placement.resource_providers_client import \
+    ResourceProvidersClient
 
-__all__ = ['PlacementClient']
+__all__ = ['PlacementClient', 'ResourceProvidersClient']
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
index b8e91b8..216ac08 100644
--- a/tempest/lib/services/placement/placement_client.py
+++ b/tempest/lib/services/placement/placement_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib.services.placement import base_placement_client
diff --git a/tempest/lib/services/placement/resource_providers_client.py b/tempest/lib/services/placement/resource_providers_client.py
new file mode 100644
index 0000000..3214053
--- /dev/null
+++ b/tempest/lib/services/placement/resource_providers_client.py
@@ -0,0 +1,123 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class ResourceProvidersClient(base_placement_client.BasePlacementClient):
+    """Client class for resource provider related methods
+
+    This client class aims to support read-only API operations for resource
+    providers. The following resources are supported:
+    * resource providers
+    * resource provider inventories
+    * resource provider aggregates
+    * resource provider usages
+    """
+
+    def list_resource_providers(self, **params):
+        """List resource providers.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-providers
+        """
+        url = '/resource_providers'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_resource_provider(self, rp_uuid):
+        """Show resource provider.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#show-resource-provider
+        """
+        url = '/resource_providers/%s' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_inventories(self, rp_uuid):
+        """List resource provider inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-inventories
+        """
+        url = '/resource_providers/%s/inventories' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_usages(self, rp_uuid):
+        """List resource provider usages.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-usages
+        """
+        url = '/resource_providers/%s/usages' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_aggregates(self, rp_uuid):
+        """List resource provider aggregates.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-aggregates
+        """
+        url = '/resource_providers/%s/aggregates' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_resource_providers_inventories(self, rp_uuid, **kwargs):
+        """Update resource providers inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#update-resource-provider-inventories
+        """
+        url = '/resource_providers/{}/inventories'.format(rp_uuid)
+        data = json.dumps(kwargs)
+        resp, body = self.put(url, data)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_resource_providers_inventories(self, rp_uuid):
+        """Delete resource providers inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#delete-resource-provider-inventories
+        """
+        url = '/resource_providers/{}/inventories'.format(rp_uuid)
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/__init__.py b/tempest/lib/services/volume/__init__.py
index 6855d8e..4b47251 100644
--- a/tempest/lib/services/volume/__init__.py
+++ b/tempest/lib/services/volume/__init__.py
@@ -12,8 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-from tempest.lib.services.volume import v1
 from tempest.lib.services.volume import v2
 from tempest.lib.services.volume import v3
 
-__all__ = ['v1', 'v2', 'v3']
+__all__ = ['v2', 'v3']
diff --git a/tempest/lib/services/volume/base_client.py b/tempest/lib/services/volume/base_client.py
index c7fb21a..c0ac62d 100644
--- a/tempest/lib/services/volume/base_client.py
+++ b/tempest/lib/services/volume/base_client.py
@@ -13,8 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.lib.common import api_version_request
 from tempest.lib.common import api_version_utils
 from tempest.lib.common import rest_client
+from tempest.lib import exceptions
 
 VOLUME_MICROVERSION = None
 
@@ -43,3 +45,39 @@
                 'volume %s' % VOLUME_MICROVERSION,
                 resp)
         return resp, resp_body
+
+    def get_schema(self, schema_versions_info):
+        """Get JSON schema
+
+        This method provides the matching schema for requested
+        microversion.
+
+        :param schema_versions_info: List of dict which provides schema
+                                     information with range of valid versions.
+
+        Example::
+
+         schema_versions_info = [
+             {'min': None, 'max': '2.1', 'schema': schemav21},
+             {'min': '2.2', 'max': '2.9', 'schema': schemav22},
+             {'min': '2.10', 'max': None, 'schema': schemav210}]
+        """
+        schema = None
+        version = api_version_request.APIVersionRequest(VOLUME_MICROVERSION)
+        for items in schema_versions_info:
+            min_version = api_version_request.APIVersionRequest(items['min'])
+            max_version = api_version_request.APIVersionRequest(items['max'])
+            # This is case where COMPUTE_MICROVERSION is None, which means
+            # request without microversion So select base v2.1 schema.
+            if version.is_null() and items['min'] is None:
+                schema = items['schema']
+                break
+            # else select appropriate schema as per COMPUTE_MICROVERSION
+            elif version.matches(min_version, max_version):
+                schema = items['schema']
+                break
+        if schema is None:
+            raise exceptions.JSONSchemaNotFound(
+                version=version.get_string(),
+                schema_versions_info=schema_versions_info)
+        return schema
diff --git a/tempest/lib/services/volume/v1/__init__.py b/tempest/lib/services/volume/v1/__init__.py
deleted file mode 100644
index 7b5991f..0000000
--- a/tempest/lib/services/volume/v1/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.lib.services.volume.v1.availability_zone_client \
-    import AvailabilityZoneClient
-from tempest.lib.services.volume.v1.backups_client import BackupsClient
-from tempest.lib.services.volume.v1.encryption_types_client import \
-    EncryptionTypesClient
-from tempest.lib.services.volume.v1.extensions_client import ExtensionsClient
-from tempest.lib.services.volume.v1.hosts_client import HostsClient
-from tempest.lib.services.volume.v1.limits_client import LimitsClient
-from tempest.lib.services.volume.v1.qos_client import QosSpecsClient
-from tempest.lib.services.volume.v1.quotas_client import QuotasClient
-from tempest.lib.services.volume.v1.services_client import ServicesClient
-from tempest.lib.services.volume.v1.snapshots_client import SnapshotsClient
-from tempest.lib.services.volume.v1.types_client import TypesClient
-from tempest.lib.services.volume.v1.volumes_client import VolumesClient
-
-__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'EncryptionTypesClient',
-           'ExtensionsClient', 'HostsClient', 'QosSpecsClient', 'QuotasClient',
-           'ServicesClient', 'SnapshotsClient', 'TypesClient', 'VolumesClient',
-           'LimitsClient']
diff --git a/tempest/lib/services/volume/v1/availability_zone_client.py b/tempest/lib/services/volume/v1/availability_zone_client.py
deleted file mode 100644
index be4f539..0000000
--- a/tempest/lib/services/volume/v1/availability_zone_client.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2014 NEC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class AvailabilityZoneClient(rest_client.RestClient):
-    """Volume V1 availability zone client."""
-
-    def list_availability_zones(self):
-        resp, body = self.get('os-availability-zone')
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/backups_client.py b/tempest/lib/services/volume/v1/backups_client.py
deleted file mode 100644
index 2289253..0000000
--- a/tempest/lib/services/volume/v1/backups_client.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class BackupsClient(rest_client.RestClient):
-    """Volume V1 Backups client"""
-    api_version = "v1"
-
-    def create_backup(self, **kwargs):
-        """Creates a backup of volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-backup
-        """
-        post_body = json.dumps({'backup': kwargs})
-        resp, body = self.post('backups', post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def restore_backup(self, backup_id, **kwargs):
-        """Restore volume from backup.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#restore-backup
-        """
-        post_body = json.dumps({'restore': kwargs})
-        resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_backup(self, backup_id):
-        """Delete a backup of volume."""
-        resp, body = self.delete('backups/%s' % backup_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_backup(self, backup_id):
-        """Returns the details of a single backup."""
-        url = "backups/%s" % backup_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_backups(self, detail=False):
-        """Information for all the tenant's backups."""
-        url = "backups"
-        if detail:
-            url += "/detail"
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def export_backup(self, backup_id):
-        """Export backup metadata record."""
-        url = "backups/%s/export_record" % backup_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def import_backup(self, **kwargs):
-        """Import backup metadata record."""
-        # TODO(linanbj): Current api-site doesn't contain this API description.
-        # After fixing the api-site, we need to fix here also for putting the
-        # link to api-site.
-        post_body = json.dumps({'backup-record': kwargs})
-        resp, body = self.post("backups/import_record", post_body)
-        body = json.loads(body)
-        self.expected_success(201, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reset_backup_status(self, backup_id, status):
-        """Reset the specified backup's status."""
-        post_body = json.dumps({'os-reset_status': {"status": status}})
-        resp, body = self.post('backups/%s/action' % backup_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_backup(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'backup'
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
deleted file mode 100644
index 8e75ff9..0000000
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class EncryptionTypesClient(rest_client.RestClient):
-
-    def is_resource_deleted(self, id):
-        try:
-            body = self.show_encryption_type(id)
-            if not body:
-                return True
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'encryption-type'
-
-    def show_encryption_type(self, volume_type_id):
-        """Get the volume encryption type for the specified volume type.
-
-        :param volume_type_id: Id of volume type.
-        """
-        url = "/types/%s/encryption" % volume_type_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_encryption_type(self, volume_type_id, **kwargs):
-        """Create encryption type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
-        """
-        url = "/types/%s/encryption" % volume_type_id
-        post_body = json.dumps({'encryption': kwargs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_encryption_type(self, volume_type_id):
-        """Delete the encryption type for the specified volume type."""
-        resp, body = self.delete(
-            "/types/%s/encryption/provider" % volume_type_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/extensions_client.py b/tempest/lib/services/volume/v1/extensions_client.py
deleted file mode 100644
index 7b849a8..0000000
--- a/tempest/lib/services/volume/v1/extensions_client.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class ExtensionsClient(rest_client.RestClient):
-    """Volume V1 extensions client."""
-
-    def list_extensions(self):
-        url = 'extensions'
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/hosts_client.py b/tempest/lib/services/volume/v1/hosts_client.py
deleted file mode 100644
index f344678..0000000
--- a/tempest/lib/services/volume/v1/hosts_client.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class HostsClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Host API V1 requests"""
-
-    def list_hosts(self, **params):
-        """Lists all hosts.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-all-hosts
-        """
-        url = 'os-hosts'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/qos_client.py b/tempest/lib/services/volume/v1/qos_client.py
deleted file mode 100644
index 67f2ead..0000000
--- a/tempest/lib/services/volume/v1/qos_client.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class QosSpecsClient(rest_client.RestClient):
-    """Volume V1 QoS client.
-
-       Client class to send CRUD QoS API requests
-    """
-
-    api_version = "v1"
-
-    def is_resource_deleted(self, qos_id):
-        try:
-            self.show_qos(qos_id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'qos'
-
-    def create_qos(self, **kwargs):
-        """Create a QoS Specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-qos-specification
-        """
-        post_body = json.dumps({'qos_specs': kwargs})
-        resp, body = self.post('qos-specs', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_qos(self, qos_id, force=False):
-        """Delete the specified QoS specification."""
-        resp, body = self.delete(
-            "qos-specs/%s?force=%s" % (qos_id, force))
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_qos(self):
-        """List all the QoS specifications created."""
-        url = 'qos-specs'
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_qos(self, qos_id):
-        """Get the specified QoS specification."""
-        url = "qos-specs/%s" % qos_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def set_qos_key(self, qos_id, **kwargs):
-        """Set the specified keys/values of QoS specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#set-keys-in-qos-specification
-        """
-        put_body = json.dumps({"qos_specs": kwargs})
-        resp, body = self.put('qos-specs/%s' % qos_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def unset_qos_key(self, qos_id, keys):
-        """Unset the specified keys of QoS specification.
-
-        :param keys: keys to delete from the QoS specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
-        """
-        put_body = json.dumps({'keys': keys})
-        resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def associate_qos(self, qos_id, vol_type_id):
-        """Associate the specified QoS with specified volume-type."""
-        url = "qos-specs/%s/associate" % qos_id
-        url += "?vol_type_id=%s" % vol_type_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_association_qos(self, qos_id):
-        """Get the association of the specified QoS specification."""
-        url = "qos-specs/%s/associations" % qos_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def disassociate_qos(self, qos_id, vol_type_id):
-        """Disassociate the specified QoS with specified volume-type."""
-        url = "qos-specs/%s/disassociate" % qos_id
-        url += "?vol_type_id=%s" % vol_type_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def disassociate_all_qos(self, qos_id):
-        """Disassociate the specified QoS with all associations."""
-        url = "qos-specs/%s/disassociate_all" % qos_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/quotas_client.py b/tempest/lib/services/volume/v1/quotas_client.py
deleted file mode 100644
index 7f191ca..0000000
--- a/tempest/lib/services/volume/v1/quotas_client.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class QuotasClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Quotas API V1 requests"""
-
-    def show_default_quota_set(self, tenant_id):
-        """List the default volume quota set for a tenant."""
-
-        url = 'os-quota-sets/%s/defaults' % tenant_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_quota_set(self, tenant_id, params=None):
-        """List the quota set for a tenant."""
-
-        url = 'os-quota-sets/%s' % tenant_id
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_quota_set(self, tenant_id, **kwargs):
-        """Updates quota set
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-quotas
-        """
-        put_body = jsonutils.dumps({'quota_set': kwargs})
-        resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_quota_set(self, tenant_id):
-        """Delete the tenant's quota set."""
-        resp, body = self.delete('os-quota-sets/%s' % tenant_id)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/services_client.py b/tempest/lib/services/volume/v1/services_client.py
deleted file mode 100644
index d438a34..0000000
--- a/tempest/lib/services/volume/v1/services_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class ServicesClient(rest_client.RestClient):
-    """Volume V1 volume services client"""
-
-    def list_services(self, **params):
-        url = 'os-services'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/snapshots_client.py b/tempest/lib/services/volume/v1/snapshots_client.py
deleted file mode 100644
index 7dfdcf2..0000000
--- a/tempest/lib/services/volume/v1/snapshots_client.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class SnapshotsClient(rest_client.RestClient):
-    """Client class to send CRUD Volume V1 API requests."""
-
-    create_resp = 200
-
-    def list_snapshots(self, detail=False, **params):
-        """List all the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
-        """
-        url = 'snapshots'
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_snapshot(self, snapshot_id):
-        """Returns the details of a single snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
-        """
-        url = "snapshots/%s" % snapshot_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_snapshot(self, **kwargs):
-        """Creates a new snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-snapshot
-        """
-        post_body = json.dumps({'snapshot': kwargs})
-        resp, body = self.post('snapshots', post_body)
-        body = json.loads(body)
-        self.expected_success(self.create_resp, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_snapshot(self, snapshot_id):
-        """Delete Snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#delete-snapshot
-        """
-        resp, body = self.delete("snapshots/%s" % snapshot_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_snapshot(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume-snapshot'
-
-    def reset_snapshot_status(self, snapshot_id, status):
-        """Reset the specified snapshot's status."""
-        post_body = json.dumps({'os-reset_status': {"status": status}})
-        resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_status(self, snapshot_id, **kwargs):
-        """Update the specified snapshot's status."""
-        # TODO(gmann): api-site doesn't contain doc ref
-        # for this API. After fixing the api-site, we need to
-        # add the link here.
-        # Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
-
-        post_body = json.dumps({'os-update_snapshot_status': kwargs})
-        url = 'snapshots/%s/action' % snapshot_id
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_snapshot_metadata(self, snapshot_id, metadata):
-        """Create metadata for the snapshot."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.post(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot(self, snapshot_id, **kwargs):
-        """Updates a snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot
-        """
-        put_body = json.dumps({'snapshot': kwargs})
-        resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_snapshot_metadata(self, snapshot_id):
-        """Get metadata of the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
-        """
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_metadata(self, snapshot_id, **kwargs):
-        """Update metadata for the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
-        """
-        put_body = json.dumps(kwargs)
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
-        """Update metadata item for the snapshot."""
-        # TODO(piyush): Current api-site doesn't contain this API description.
-        # After fixing the api-site, we need to fix here also for putting the
-        # link to api-site.
-        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
-        put_body = json.dumps(kwargs)
-        url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_snapshot_metadata_item(self, snapshot_id, id):
-        """Delete metadata item for the snapshot."""
-        url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
-        resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def force_delete_snapshot(self, snapshot_id):
-        """Force Delete Snapshot."""
-        post_body = json.dumps({'os-force_delete': {}})
-        resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
deleted file mode 100644
index d434e65..0000000
--- a/tempest/lib/services/volume/v1/types_client.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class TypesClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Types API requests"""
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_volume_type(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume-type'
-
-    def list_volume_types(self, **params):
-        """List all the volume_types created.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
-        """
-        url = 'types'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_type(self, volume_type_id):
-        """Returns the details of a single volume_type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
-        """
-        url = "types/%s" % volume_type_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_type(self, **kwargs):
-        """Create volume type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
-        """
-        post_body = json.dumps({'volume_type': kwargs})
-        resp, body = self.post('types', post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_type(self, volume_type_id):
-        """Deletes the Specified Volume_type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#delete-volume-type
-        """
-        resp, body = self.delete("types/%s" % volume_type_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_volume_types_extra_specs(self, volume_type_id, **params):
-        """List all the volume_types extra specs created.
-
-        TODO: Current api-site doesn't contain this API description.
-        After fixing the api-site, we need to fix here also for putting
-        the link to api-site.
-        """
-        url = 'types/%s/extra_specs' % volume_type_id
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
-        """Returns the details of a single volume_type extra spec."""
-        url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
-        """Creates a new Volume_type extra spec.
-
-        volume_type_id: Id of volume_type.
-        extra_specs: A dictionary of values to be used as extra_specs.
-        """
-        url = "types/%s/extra_specs" % volume_type_id
-        post_body = json.dumps({'extra_specs': extra_specs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
-        """Deletes the Specified Volume_type extra spec."""
-        resp, body = self.delete("types/%s/extra_specs/%s" % (
-            volume_type_id, extra_spec_name))
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_type(self, volume_type_id, **kwargs):
-        """Updates volume type name, description, and/or is_public.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-volume-type
-        """
-        put_body = json.dumps({'volume_type': kwargs})
-        resp, body = self.put('types/%s' % volume_type_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_type_extra_specs(self, volume_type_id, extra_spec_name,
-                                       extra_specs):
-        """Update a volume_type extra spec.
-
-        :param volume_type_id: Id of volume_type.
-        :param extra_spec_name: Name of the extra spec to be updated.
-        :param extra_specs: A dictionary of with key as extra_spec_name and the
-                            updated value.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
-        """
-        url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
-        put_body = json.dumps(extra_specs)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
deleted file mode 100644
index 4ed5eb1..0000000
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-import six
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class VolumesClient(rest_client.RestClient):
-    """Client class to send CRUD Volume V1 API requests"""
-
-    def _prepare_params(self, params):
-        """Prepares params for use in get or _ext_get methods.
-
-        If params is a string it will be left as it is, but if it's not it will
-        be urlencoded.
-        """
-        if isinstance(params, six.string_types):
-            return params
-        return urllib.urlencode(params)
-
-    def list_volumes(self, detail=False, params=None):
-        """List all the volumes created.
-
-        Params can be a string (must be urlencoded) or a dictionary.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
-        """
-        url = 'volumes'
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % self._prepare_params(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume(self, volume_id):
-        """Returns the details of a single volume."""
-        url = "volumes/%s" % volume_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume(self, **kwargs):
-        """Creates a new Volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume
-        """
-        post_body = json.dumps({'volume': kwargs})
-        resp, body = self.post('volumes', post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume(self, volume_id, **kwargs):
-        """Updates the Specified Volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-volume
-        """
-        put_body = json.dumps({'volume': kwargs})
-        resp, body = self.put('volumes/%s' % volume_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume(self, volume_id):
-        """Deletes the Specified Volume."""
-        resp, body = self.delete("volumes/%s" % volume_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def upload_volume(self, volume_id, **kwargs):
-        """Uploads a volume in Glance."""
-        post_body = json.dumps({'os-volume_upload_image': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def attach_volume(self, volume_id, **kwargs):
-        """Attaches a volume to a given instance on a given mountpoint.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
-        """
-        post_body = json.dumps({'os-attach': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def set_bootable_volume(self, volume_id, **kwargs):
-        """set a bootable flag for a volume - true or false."""
-        post_body = json.dumps({'os-set_bootable': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def detach_volume(self, volume_id):
-        """Detaches a volume from an instance."""
-        post_body = json.dumps({'os-detach': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reserve_volume(self, volume_id):
-        """Reserves a volume."""
-        post_body = json.dumps({'os-reserve': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def unreserve_volume(self, volume_id):
-        """Restore a reserved volume ."""
-        post_body = json.dumps({'os-unreserve': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_volume(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume'
-
-    def extend_volume(self, volume_id, **kwargs):
-        """Extend a volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#extend-volume-size
-        """
-        post_body = json.dumps({'os-extend': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reset_volume_status(self, volume_id, **kwargs):
-        """Reset the Specified Volume's Status.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
-        """
-        post_body = json.dumps({'os-reset_status': kwargs})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_transfer(self, **kwargs):
-        """Create a volume transfer.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
-        """
-        post_body = json.dumps({'transfer': kwargs})
-        resp, body = self.post('os-volume-transfer', post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_transfer(self, transfer_id):
-        """Returns the details of a volume transfer."""
-        url = "os-volume-transfer/%s" % transfer_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_volume_transfers(self, **params):
-        """List all the volume transfers created.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
-        """
-        url = 'os-volume-transfer'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_transfer(self, transfer_id):
-        """Delete a volume transfer."""
-        resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def accept_volume_transfer(self, transfer_id, **kwargs):
-        """Accept a volume transfer.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
-        """
-        url = 'os-volume-transfer/%s/accept' % transfer_id
-        post_body = json.dumps({'accept': kwargs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_readonly(self, volume_id, **kwargs):
-        """Update the Specified Volume readonly."""
-        post_body = json.dumps({'os-update_readonly_flag': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def force_delete_volume(self, volume_id):
-        """Force Delete Volume."""
-        post_body = json.dumps({'os-force_delete': {}})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_metadata(self, volume_id, metadata):
-        """Create metadata for the volume."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.post(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_metadata(self, volume_id):
-        """Get metadata of the volume."""
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_metadata(self, volume_id, metadata):
-        """Update metadata for the volume."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_metadata_item(self, volume_id, id, meta_item):
-        """Update metadata item for the volume."""
-        put_body = json.dumps({'meta': meta_item})
-        url = "volumes/%s/metadata/%s" % (volume_id, id)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_metadata_item(self, volume_id, id):
-        """Delete metadata item for the volume."""
-        url = "volumes/%s/metadata/%s" % (volume_id, id)
-        resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def retype_volume(self, volume_id, **kwargs):
-        """Updates volume with new volume type."""
-        post_body = json.dumps({'os-retype': kwargs})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v2/__init__.py b/tempest/lib/services/volume/v2/__init__.py
index 68982d9..756a41a 100644
--- a/tempest/lib/services/volume/v2/__init__.py
+++ b/tempest/lib/services/volume/v2/__init__.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import warnings
+
 from tempest.lib.services.volume.v2.availability_zone_client \
     import AvailabilityZoneClient
 from tempest.lib.services.volume.v2.backups_client import BackupsClient
@@ -44,3 +46,9 @@
            'LimitsClient', 'CapabilitiesClient', 'SchedulerStatsClient',
            'SnapshotManageClient', 'VolumeManageClient', 'TransfersClient',
            'QuotaClassesClient']
+
+warnings.warn(
+    "The tempest.lib.services.volume.v2 module (volume v2 APIs service "
+    "clients) is deprecated in favor of tempest.lib.services.volume.v3 "
+    "(volume v3 APIs service clients) and will be removed once Tempest stop "
+    "supporting stable wallaby.", DeprecationWarning)
diff --git a/tempest/lib/services/volume/v3/__init__.py b/tempest/lib/services/volume/v3/__init__.py
index a1b7de3..039640b 100644
--- a/tempest/lib/services/volume/v3/__init__.py
+++ b/tempest/lib/services/volume/v3/__init__.py
@@ -11,6 +11,7 @@
 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 # License for the specific language governing permissions and limitations under
 # the License.
+from tempest.lib.services.volume.v3.attachments_client import AttachmentsClient
 from tempest.lib.services.volume.v3.availability_zone_client \
     import AvailabilityZoneClient
 from tempest.lib.services.volume.v3.backups_client import BackupsClient
@@ -38,17 +39,18 @@
     SnapshotManageClient
 from tempest.lib.services.volume.v3.snapshots_client import SnapshotsClient
 from tempest.lib.services.volume.v3.transfers_client import TransfersClient
+from tempest.lib.services.volume.v3.transfers_client import TransfersV355Client
 from tempest.lib.services.volume.v3.types_client import TypesClient
 from tempest.lib.services.volume.v3.versions_client import VersionsClient
 from tempest.lib.services.volume.v3.volume_manage_client import \
     VolumeManageClient
 from tempest.lib.services.volume.v3.volumes_client import VolumesClient
-
-__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'BaseClient',
-           'CapabilitiesClient', 'EncryptionTypesClient', 'ExtensionsClient',
-           'GroupSnapshotsClient', 'GroupTypesClient', 'GroupsClient',
-           'HostsClient', 'LimitsClient', 'MessagesClient', 'QosSpecsClient',
-           'QuotaClassesClient', 'QuotasClient', 'SchedulerStatsClient',
-           'ServicesClient', 'SnapshotManageClient', 'SnapshotsClient',
-           'TransfersClient', 'TypesClient', 'VersionsClient',
-           'VolumeManageClient', 'VolumesClient']
+__all__ = ['AttachmentsClient', 'AvailabilityZoneClient', 'BackupsClient',
+           'BaseClient', 'CapabilitiesClient', 'EncryptionTypesClient',
+           'ExtensionsClient', 'GroupSnapshotsClient', 'GroupTypesClient',
+           'GroupsClient', 'HostsClient', 'LimitsClient', 'MessagesClient',
+           'QosSpecsClient', 'QuotaClassesClient', 'QuotasClient',
+           'SchedulerStatsClient', 'ServicesClient', 'SnapshotManageClient',
+           'SnapshotsClient', 'TransfersClient', 'TransfersV355Client',
+           'TypesClient', 'VersionsClient', 'VolumeManageClient',
+           'VolumesClient']
diff --git a/tempest/lib/services/volume/v1/limits_client.py b/tempest/lib/services/volume/v3/attachments_client.py
similarity index 73%
rename from tempest/lib/services/volume/v1/limits_client.py
rename to tempest/lib/services/volume/v3/attachments_client.py
index e14b2dc..5e448f7 100644
--- a/tempest/lib/services/volume/v1/limits_client.py
+++ b/tempest/lib/services/volume/v3/attachments_client.py
@@ -1,6 +1,3 @@
-# Copyright 2016 Red Hat, Inc.
-# All Rights Reserved.
-#
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -16,16 +13,15 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
 
 
-class LimitsClient(rest_client.RestClient):
-    """Volume V1 limits client."""
+class AttachmentsClient(base_client.BaseClient):
+    """Client class to send CRUD attachment V3 API requests"""
 
-    api_version = "v1"
-
-    def show_limits(self):
-        """Returns the details of a volume absolute limits."""
-        url = "limits"
+    def show_attachment(self, attachment_id):
+        """Show volume attachment."""
+        url = "attachments/%s" % (attachment_id)
         resp, body = self.get(url)
         body = json.loads(body)
         self.expected_success(200, resp.status)
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index 970471e..4bf7ffb 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -13,9 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import backups as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.volume import base_client
@@ -34,7 +36,7 @@
         post_body = json.dumps({'backup': kwargs})
         resp, body = self.post('backups', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_backup(self, backup_id, **kwargs):
@@ -47,7 +49,7 @@
         put_body = json.dumps({'backup': kwargs})
         resp, body = self.put('backups/%s' % backup_id, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def restore_backup(self, backup_id, **kwargs):
@@ -60,13 +62,13 @@
         post_body = json.dumps({'restore': kwargs})
         resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.restore_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_backup(self, backup_id):
         """Delete a backup of volume."""
         resp, body = self.delete('backups/%s' % backup_id)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_backup(self, backup_id):
@@ -74,7 +76,7 @@
         url = "backups/%s" % backup_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def list_backups(self, detail=False, **params):
@@ -86,13 +88,15 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-with-detail
         """
         url = "backups"
+        list_backups_schema = schema.list_backups_no_detail
         if detail:
             url += "/detail"
+            list_backups_schema = schema.list_backups_with_detail
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(list_backups_schema, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def export_backup(self, backup_id):
@@ -100,7 +104,7 @@
         url = "backups/%s/export_record" % backup_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.export_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def import_backup(self, **kwargs):
@@ -113,14 +117,14 @@
         post_body = json.dumps({'backup-record': kwargs})
         resp, body = self.post("backups/import_record", post_body)
         body = json.loads(body)
-        self.expected_success(201, resp.status)
+        self.validate_response(schema.import_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def reset_backup_status(self, backup_id, status):
         """Reset the specified backup's status."""
         post_body = json.dumps({'os-reset_status': {"status": status}})
         resp, body = self.post('backups/%s/action' % backup_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reset_backup_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def is_resource_deleted(self, id):
diff --git a/tempest/lib/services/volume/v3/encryption_types_client.py b/tempest/lib/services/volume/v3/encryption_types_client.py
index bd809d6..7cced57 100644
--- a/tempest/lib/services/volume/v3/encryption_types_client.py
+++ b/tempest/lib/services/volume/v3/encryption_types_client.py
@@ -15,6 +15,7 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume import encryption_types as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 
@@ -43,7 +44,7 @@
         url = "/types/%s/encryption" % volume_type_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_encryption_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_encryption_specs_item(self, volume_type_id, key):
@@ -51,7 +52,7 @@
         url = "/types/%s/encryption/%s" % (volume_type_id, key)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_encryption_specs_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_encryption_type(self, volume_type_id, **kwargs):
@@ -65,14 +66,14 @@
         post_body = json.dumps({'encryption': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.create_encryption_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_encryption_type(self, volume_type_id):
         """Delete the encryption type for the specified volume-type."""
         resp, body = self.delete(
             "/types/%s/encryption/provider" % volume_type_id)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_encryption_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_encryption_type(self, volume_type_id, **kwargs):
@@ -86,5 +87,5 @@
         put_body = json.dumps({'encryption': kwargs})
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_encryption_type, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/group_snapshots_client.py b/tempest/lib/services/volume/v3/group_snapshots_client.py
index e425a3f..0f36fc9 100644
--- a/tempest/lib/services/volume/v3/group_snapshots_client.py
+++ b/tempest/lib/services/volume/v3/group_snapshots_client.py
@@ -13,9 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import group_snapshots as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.volume import base_client
@@ -34,7 +36,7 @@
         post_body = json.dumps({'group_snapshot': kwargs})
         resp, body = self.post('group_snapshots', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_group_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_group_snapshot(self, group_snapshot_id):
@@ -44,7 +46,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/#delete-group-snapshot
         """
         resp, body = self.delete('group_snapshots/%s' % group_snapshot_id)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_group_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_group_snapshot(self, group_snapshot_id):
@@ -56,7 +58,7 @@
         url = "group_snapshots/%s" % str(group_snapshot_id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_group_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def list_group_snapshots(self, detail=False, **params):
@@ -67,13 +69,15 @@
         https://docs.openstack.org/api-ref/block-storage/v3/#list-group-snapshots-with-details
         """
         url = "group_snapshots"
+        list_group_snapshots = schema.list_group_snapshots_no_detail
         if detail:
             url += "/detail"
+            list_group_snapshots = schema.list_group_snapshots_with_detail
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(list_group_snapshots, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def reset_group_snapshot_status(self, group_snapshot_id, status_to_set):
@@ -85,7 +89,7 @@
         post_body = json.dumps({'reset_status': {'status': status_to_set}})
         resp, body = self.post('group_snapshots/%s/action' % group_snapshot_id,
                                post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reset_group_snapshot_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def is_resource_deleted(self, id):
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 99833ce..9de36f4 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -13,9 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import group_types as schema
 from tempest.lib.common import rest_client
 from tempest.lib.services.volume import base_client
 
@@ -38,13 +40,13 @@
         post_body = json.dumps({'group_type': kwargs})
         resp, body = self.post('group_types', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_group_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_group_type(self, group_type_id):
         """Deletes the specified group_type."""
         resp, body = self.delete("group_types/%s" % group_type_id)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_group_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def list_group_types(self, **params):
@@ -60,7 +62,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.list_group_types, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_default_group_type(self):
@@ -72,7 +74,7 @@
         url = 'group_types/default'
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_default_group_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_group_type(self, group_type_id):
@@ -84,7 +86,7 @@
         url = "group_types/%s" % group_type_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_group_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_group_type(self, group_type_id, **kwargs):
@@ -96,8 +98,8 @@
         """
         post_body = json.dumps({'group_type': kwargs})
         resp, body = self.put('group_types/%s' % group_type_id, post_body)
-        self.expected_success(200, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.update_group_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_or_update_group_type_specs(self, group_type_id, group_specs):
@@ -111,7 +113,8 @@
         post_body = json.dumps({'group_specs': group_specs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(
+            schema.create_or_update_group_type_specs, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def list_group_type_specs(self, group_type_id):
@@ -119,7 +122,7 @@
         url = 'group_types/%s/group_specs' % group_type_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.list_group_type_specs, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_group_type_specs_item(self, group_type_id, spec_id):
@@ -127,7 +130,7 @@
         url = "group_types/%s/group_specs/%s" % (group_type_id, spec_id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_group_type_specs_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_group_type_specs_item(self, group_type_id, spec_id, spec):
@@ -141,12 +144,12 @@
         put_body = json.dumps(spec)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_group_type_specs_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_group_type_specs_item(self, group_type_id, spec_id):
         """Deletes specified item of group specs for a given group type."""
         resp, body = self.delete("group_types/%s/group_specs/%s" % (
             group_type_id, spec_id))
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_group_type_specs_item, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/groups_client.py b/tempest/lib/services/volume/v3/groups_client.py
index ffae232..d1500cf 100644
--- a/tempest/lib/services/volume/v3/groups_client.py
+++ b/tempest/lib/services/volume/v3/groups_client.py
@@ -13,9 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import groups as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.volume import base_client
@@ -23,6 +25,7 @@
 
 class GroupsClient(base_client.BaseClient):
     """Client class to send CRUD Volume Group API requests"""
+    api_version = 'v3'
 
     def create_group(self, **kwargs):
         """Creates a group.
@@ -35,7 +38,7 @@
         post_body = json.dumps({'group': kwargs})
         resp, body = self.post('groups', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_group, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_group(self, group_id, delete_volumes=True):
@@ -49,7 +52,7 @@
         post_body = json.dumps({'delete': post_body})
         resp, body = self.post('groups/%s/action' % group_id,
                                post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_group, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_group(self, group_id):
@@ -62,7 +65,7 @@
         url = "groups/%s" % str(group_id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_group, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def list_groups(self, detail=False, **params):
@@ -74,13 +77,15 @@
         https://docs.openstack.org/api-ref/block-storage/v3/#list-groups-with-details
         """
         url = "groups"
+        schema_list_groups = schema.list_groups_no_detail
         if detail:
             url += "/detail"
+            schema_list_groups = schema.list_groups_with_detail
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema_list_groups, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_group_from_source(self, **kwargs):
@@ -93,7 +98,7 @@
         post_body = json.dumps({'create-from-src': kwargs})
         resp, body = self.post('groups/action', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_group_from_source, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_group(self, group_id, **kwargs):
@@ -105,7 +110,7 @@
         """
         put_body = json.dumps({'group': kwargs})
         resp, body = self.put('groups/%s' % group_id, put_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.update_group, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def reset_group_status(self, group_id, status_to_set):
@@ -116,7 +121,7 @@
         """
         post_body = json.dumps({'reset_status': {'status': status_to_set}})
         resp, body = self.post('groups/%s/action' % group_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reset_group_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def is_resource_deleted(self, id):
diff --git a/tempest/lib/services/volume/v3/hosts_client.py b/tempest/lib/services/volume/v3/hosts_client.py
index 019a852..9c64659 100644
--- a/tempest/lib/services/volume/v3/hosts_client.py
+++ b/tempest/lib/services/volume/v3/hosts_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import hosts as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/limits_client.py b/tempest/lib/services/volume/v3/limits_client.py
index 9500254..a8d1377 100644
--- a/tempest/lib/services/volume/v3/limits_client.py
+++ b/tempest/lib/services/volume/v3/limits_client.py
@@ -15,6 +15,7 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume import limits as schema
 from tempest.lib.common import rest_client
 
 
@@ -26,5 +27,5 @@
         url = "limits"
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_limits, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/quotas_client.py b/tempest/lib/services/volume/v3/quotas_client.py
index 5b1a52c..3f4c4e1 100644
--- a/tempest/lib/services/volume/v3/quotas_client.py
+++ b/tempest/lib/services/volume/v3/quotas_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import quotas as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/scheduler_stats_client.py b/tempest/lib/services/volume/v3/scheduler_stats_client.py
index 2ae8600..e18980d 100644
--- a/tempest/lib/services/volume/v3/scheduler_stats_client.py
+++ b/tempest/lib/services/volume/v3/scheduler_stats_client.py
@@ -15,6 +15,7 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume import scheduler_stats as schema
 from tempest.lib.common import rest_client
 
 
@@ -28,9 +29,11 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-all-back-end-storage-pools
         """
         url = 'scheduler-stats/get_pools'
+        schema_get_pools = schema.get_pools_no_detail
         if detail:
             url += '?detail=True'
+            schema_get_pools = schema.get_pools_with_detail
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema_get_pools, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 8bc82c9..1111f81 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -13,16 +13,23 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import services as schema
+from tempest.lib.api_schema.response.volume.v3_7 import services as schemav37
 from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
 
 
-class ServicesClient(rest_client.RestClient):
+class ServicesClient(base_client.BaseClient):
     """Client class to send CRUD Volume Services API requests"""
 
+    schema_versions_info = [
+        {'min': None, 'max': '3.6', 'schema': schema},
+        {'min': '3.7', 'max': None, 'schema': schemav37}]
+
     def list_services(self, **params):
         """List all Cinder services.
 
@@ -36,6 +43,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.list_services, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index 264381d..ae31ee9 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -13,9 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import snapshots as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 
@@ -32,14 +34,16 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-snapshots-and-details
         """
         url = 'snapshots'
+        list_schema = schema.list_snapshots_no_detail
         if detail:
             url += '/detail'
+            list_schema = schema.list_snapshots_with_detail
         if params:
             url += '?%s' % urllib.urlencode(params)
 
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(list_schema, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_snapshot(self, snapshot_id):
@@ -52,7 +56,7 @@
         url = "snapshots/%s" % snapshot_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_snapshot(self, **kwargs):
@@ -65,7 +69,7 @@
         post_body = json.dumps({'snapshot': kwargs})
         resp, body = self.post('snapshots', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_snapshot(self, snapshot_id, **kwargs):
@@ -78,7 +82,7 @@
         put_body = json.dumps({'snapshot': kwargs})
         resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_snapshot(self, snapshot_id):
@@ -89,7 +93,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#delete-a-snapshot
         """
         resp, body = self.delete("snapshots/%s" % snapshot_id)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def is_resource_deleted(self, id):
@@ -108,7 +112,7 @@
         """Reset the specified snapshot's status."""
         post_body = json.dumps({'os-reset_status': {"status": status}})
         resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reset_snapshot_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_snapshot_status(self, snapshot_id, **kwargs):
@@ -121,7 +125,7 @@
         post_body = json.dumps({'os-update_snapshot_status': kwargs})
         url = 'snapshots/%s/action' % snapshot_id
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.update_snapshot_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_snapshot_metadata(self, snapshot_id, metadata):
@@ -135,7 +139,7 @@
         url = "snapshots/%s/metadata" % snapshot_id
         resp, body = self.post(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.create_snapshot_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_snapshot_metadata(self, snapshot_id):
@@ -148,7 +152,7 @@
         url = "snapshots/%s/metadata" % snapshot_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_snapshot_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_snapshot_metadata(self, snapshot_id, **kwargs):
@@ -162,7 +166,7 @@
         url = "snapshots/%s/metadata" % snapshot_id
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_snapshot_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_snapshot_metadata_item(self, snapshot_id, id):
@@ -170,7 +174,7 @@
         url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_snapshot_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
@@ -184,21 +188,23 @@
         url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(
+            schema.update_snapshot_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_snapshot_metadata_item(self, snapshot_id, id):
         """Delete metadata item for the snapshot."""
         url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
         resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
+        self.validate_response(
+            schema.delete_snapshot_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def force_delete_snapshot(self, snapshot_id):
         """Force Delete Snapshot."""
         post_body = json.dumps({'os-force_delete': {}})
         resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.force_delete_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def unmanage_snapshot(self, snapshot_id):
@@ -206,5 +212,5 @@
         post_body = json.dumps({'os-unmanage': {}})
         url = 'snapshots/%s/action' % (snapshot_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.unmanage_snapshot, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/transfers_client.py b/tempest/lib/services/volume/v3/transfers_client.py
index f572f95..cc4e1b2 100644
--- a/tempest/lib/services/volume/v3/transfers_client.py
+++ b/tempest/lib/services/volume/v3/transfers_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import transfers as schema
 from tempest.lib.common import rest_client
@@ -23,6 +24,8 @@
 class TransfersClient(rest_client.RestClient):
     """Client class to send CRUD Volume Transfer API requests"""
 
+    resource_path = 'os-volume-transfer'
+
     def create_volume_transfer(self, **kwargs):
         """Create a volume transfer.
 
@@ -31,14 +34,14 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-transfer
         """
         post_body = json.dumps({'transfer': kwargs})
-        resp, body = self.post('os-volume-transfer', post_body)
+        resp, body = self.post(self.resource_path, post_body)
         body = json.loads(body)
         self.validate_response(schema.create_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_transfer(self, transfer_id):
         """Returns the details of a volume transfer."""
-        url = "os-volume-transfer/%s" % transfer_id
+        url = "%s/%s" % (self.resource_path, transfer_id)
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.show_volume_transfer, resp, body)
@@ -52,7 +55,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-for-a-project
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
         """
-        url = 'os-volume-transfer'
+        url = self.resource_path
         schema_list_transfers = schema.list_volume_transfers_no_detail
         if detail:
             url += '/detail'
@@ -66,7 +69,7 @@
 
     def delete_volume_transfer(self, transfer_id):
         """Delete a volume transfer."""
-        resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
+        resp, body = self.delete("%s/%s" % (self.resource_path, transfer_id))
         self.validate_response(schema.delete_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -77,9 +80,14 @@
         API reference:
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#accept-a-volume-transfer
         """
-        url = 'os-volume-transfer/%s/accept' % transfer_id
+        url = '%s/%s/accept' % (self.resource_path, transfer_id)
         post_body = json.dumps({'accept': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.validate_response(schema.accept_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
+
+
+class TransfersV355Client(TransfersClient):
+    """Client class to send CRUD for the "new" Transfers API (mv 3.55)"""
+    resource_path = 'volume-transfers'
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 7fa24a4..9858d87 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import volume_types as schema
 from tempest.lib.common import rest_client
@@ -65,6 +66,19 @@
         self.validate_response(schema.show_volume_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
+    def show_default_volume_type(self):
+        """Returns the details of a single volume type.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-default-volume-type
+        """
+        url = "types/default"
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.show_volume_type, resp, body)
+        return rest_client.ResponseBody(resp, body)
+
     def create_volume_type(self, **kwargs):
         """Create volume type.
 
diff --git a/tempest/lib/services/volume/v3/versions_client.py b/tempest/lib/services/volume/v3/versions_client.py
index aa6c867..0bed827 100644
--- a/tempest/lib/services/volume/v3/versions_client.py
+++ b/tempest/lib/services/volume/v3/versions_client.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from six.moves.urllib.parse import urljoin
+from urllib.parse import urljoin
 
 from oslo_serialization import jsonutils as json
 
@@ -54,8 +54,9 @@
         """
 
         version_url = urljoin(self._get_base_version_url(), version + '/')
-        resp, body = self.raw_request(version_url, 'GET',
-                                      {'X-Auth-Token': self.token})
+        headers = self.get_headers()
+        headers['X-Auth-Token'] = self.token
+        resp, body = self.raw_request(version_url, 'GET', headers=headers)
         self._error_checker(resp, body)
         body = json.loads(body)
         self.validate_response(schema.volume_api_version_details, resp, body)
diff --git a/tempest/lib/services/volume/v3/volume_manage_client.py b/tempest/lib/services/volume/v3/volume_manage_client.py
index 85b1b82..f6642c5 100644
--- a/tempest/lib/services/volume/v3/volume_manage_client.py
+++ b/tempest/lib/services/volume/v3/volume_manage_client.py
@@ -15,6 +15,7 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume import manage_volume as schema
 from tempest.lib.common import rest_client
 
 
@@ -30,6 +31,6 @@
         """
         post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('os-volume-manage', post_body)
-        self.expected_success(202, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.manage_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 4fb6d2e..9c6fe68 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -13,10 +13,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_serialization import jsonutils as json
-import six
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.volume import volumes as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.volume import base_client
@@ -31,7 +32,7 @@
         If params is a string it will be left as it is, but if it's not it will
         be urlencoded.
         """
-        if isinstance(params, six.string_types):
+        if isinstance(params, str):
             return params
         return urllib.urlencode(params)
 
@@ -55,14 +56,16 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes
         """
         url = 'volumes'
+        list_schema = schema.list_volumes_no_detail
         if detail:
+            list_schema = schema.list_volumes_with_detail
             url += '/detail'
         if params:
             url += '?%s' % self._prepare_params(params)
 
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(list_schema, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def migrate_volume(self, volume_id, **kwargs):
@@ -83,7 +86,7 @@
         url = "volumes/%s" % volume_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_volume(self, **kwargs):
@@ -96,7 +99,7 @@
         post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('volumes', post_body)
         body = json.loads(body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.create_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_volume(self, volume_id, **kwargs):
@@ -109,7 +112,7 @@
         put_body = json.dumps({'volume': kwargs})
         resp, body = self.put('volumes/%s' % volume_id, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_volume(self, volume_id, **params):
@@ -123,7 +126,7 @@
         if params:
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.delete(url)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.delete_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_summary(self, **params):
@@ -138,7 +141,7 @@
             url += '?%s' % urllib.urlencode(params)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_volume_summary, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def upload_volume(self, volume_id, **kwargs):
@@ -152,6 +155,10 @@
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
         body = json.loads(body)
+        # TODO(zhufl): This is under discussion, so will be merged
+        # in a seperate patch.
+        # https://bugs.launchpad.net/cinder/+bug/1880566
+        # self.validate_response(schema.upload_volume, resp, body)
         self.expected_success(202, resp.status)
         return rest_client.ResponseBody(resp, body)
 
@@ -165,7 +172,7 @@
         post_body = json.dumps({'os-attach': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.attach_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def set_bootable_volume(self, volume_id, **kwargs):
@@ -178,7 +185,7 @@
         post_body = json.dumps({'os-set_bootable': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.set_bootable_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def detach_volume(self, volume_id):
@@ -186,7 +193,7 @@
         post_body = json.dumps({'os-detach': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.detach_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def reserve_volume(self, volume_id):
@@ -194,7 +201,7 @@
         post_body = json.dumps({'os-reserve': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reserve_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def unreserve_volume(self, volume_id):
@@ -202,7 +209,7 @@
         post_body = json.dumps({'os-unreserve': {}})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.unreserve_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def is_resource_deleted(self, id):
@@ -219,7 +226,7 @@
         if volume["volume"]["status"] == "error_deleting":
             raise lib_exc.DeleteErrorException(
                 "Volume %s failed to delete and is in error_deleting status" %
-                volume['id'])
+                volume['volume']['id'])
         return False
 
     @property
@@ -237,7 +244,7 @@
         post_body = json.dumps({'os-extend': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.extend_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def reset_volume_status(self, volume_id, **kwargs):
@@ -249,7 +256,7 @@
         """
         post_body = json.dumps({'os-reset_status': kwargs})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.reset_volume_status, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_volume_readonly(self, volume_id, **kwargs):
@@ -262,14 +269,14 @@
         post_body = json.dumps({'os-update_readonly_flag': kwargs})
         url = 'volumes/%s/action' % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.update_volume_readonly, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def force_delete_volume(self, volume_id):
         """Force Delete Volume."""
         post_body = json.dumps({'os-force_delete': {}})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.force_delete_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def create_volume_metadata(self, volume_id, metadata):
@@ -283,7 +290,7 @@
         url = "volumes/%s/metadata" % volume_id
         resp, body = self.post(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.create_volume_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_metadata(self, volume_id):
@@ -291,7 +298,7 @@
         url = "volumes/%s/metadata" % volume_id
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_volume_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_volume_metadata(self, volume_id, metadata):
@@ -305,7 +312,7 @@
         url = "volumes/%s/metadata" % volume_id
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_volume_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_metadata_item(self, volume_id, id):
@@ -313,7 +320,7 @@
         url = "volumes/%s/metadata/%s" % (volume_id, id)
         resp, body = self.get(url)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_volume_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_volume_metadata_item(self, volume_id, id, meta_item):
@@ -322,14 +329,14 @@
         url = "volumes/%s/metadata/%s" % (volume_id, id)
         resp, body = self.put(url, put_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_volume_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_volume_metadata_item(self, volume_id, id):
         """Delete metadata item for the volume."""
         url = "volumes/%s/metadata/%s" % (volume_id, id)
         resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.delete_volume_metadata_item, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def retype_volume(self, volume_id, **kwargs):
@@ -341,7 +348,7 @@
         """
         post_body = json.dumps({'os-retype': kwargs})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.retype_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def force_detach_volume(self, volume_id, **kwargs):
@@ -354,7 +361,7 @@
         post_body = json.dumps({'os-force_detach': kwargs})
         url = 'volumes/%s/action' % volume_id
         resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.force_detach_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def update_volume_image_metadata(self, volume_id, **kwargs):
@@ -368,7 +375,7 @@
         url = "volumes/%s/action" % (volume_id)
         resp, body = self.post(url, post_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.update_volume_image_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def delete_volume_image_metadata(self, volume_id, key_name):
@@ -376,7 +383,7 @@
         post_body = json.dumps({'os-unset_image_metadata': {'key': key_name}})
         url = "volumes/%s/action" % (volume_id)
         resp, body = self.post(url, post_body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.delete_volume_image_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_image_metadata(self, volume_id):
@@ -385,7 +392,7 @@
         url = "volumes/%s/action" % volume_id
         resp, body = self.post(url, post_body)
         body = json.loads(body)
-        self.expected_success(200, resp.status)
+        self.validate_response(schema.show_volume_image_metadata, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def unmanage_volume(self, volume_id):
@@ -397,5 +404,5 @@
         """
         post_body = json.dumps({'os-unmanage': {}})
         resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
+        self.validate_response(schema.unmanage_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/manager.py b/tempest/manager.py
deleted file mode 100644
index e3174d4..0000000
--- a/tempest/manager.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest import clients as tempest_clients
-from tempest import config
-from tempest.lib.services import clients
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class Manager(clients.ServiceClients):
-    """Service client manager class for backward compatibility
-
-    The former manager.Manager is not a stable interface in Tempest,
-    nonetheless it is consumed by a number of plugins already. This class
-    exists to provide some grace time for the move to tempest.lib.
-    """
-
-    def __init__(self, credentials, scope='project'):
-        msg = ("tempest.manager.Manager is not a stable interface and as such "
-               "it should not imported directly. It will be removed as "
-               "soon as the client manager becomes available in tempest.lib.")
-        LOG.warning(msg)
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        _, uri = tempest_clients.get_auth_provider_class(credentials)
-        super(Manager, self).__init__(
-            credentials=credentials, scope=scope,
-            identity_uri=uri,
-            disable_ssl_certificate_validation=dscv,
-            ca_certs=CONF.identity.ca_certificates_file,
-            trace_requests=CONF.debug.trace_requests)
-
-
-def get_auth_provider(credentials, pre_auth=False, scope='project'):
-    """Shim to get_auth_provider in clients.py
-
-    get_auth_provider used to be hosted in this module, but it has been
-    moved to clients.py now as a more permanent location.
-    This module will be removed eventually, and this shim is only
-    maintained for the benefit of plugins already consuming this interface.
-    """
-    msg = ("tempest.manager.get_auth_provider is not a stable interface and "
-           "as such it should not imported directly. It will be removed as "
-           "the client manager becomes available in tempest.lib.")
-    LOG.warning(msg)
-    return tempest_clients.get_auth_provider(credentials=credentials,
-                                             pre_auth=pre_auth, scope=scope)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index cb7acbf..39021d5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -14,9 +14,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
 import subprocess
 
 import netaddr
+
 from oslo_log import log
 from oslo_serialization import jsonutils as json
 from oslo_utils import netutils
@@ -45,7 +47,7 @@
 class ScenarioTest(tempest.test.BaseTestCase):
     """Base class for scenario tests. Uses tempest own clients. """
 
-    credentials = ['primary']
+    credentials = ['primary', 'admin']
 
     compute_min_microversion = None
     compute_max_microversion = LATEST_MICROVERSION
@@ -90,13 +92,29 @@
             volume_microversion=self.volume_request_microversion,
             placement_microversion=self.placement_request_microversion))
 
+    def setup_compute_client(cls):
+        """Compute client"""
+        cls.compute_images_client = cls.os_primary.compute_images_client
+        cls.keypairs_client = cls.os_primary.keypairs_client
+        cls.servers_client = cls.os_primary.servers_client
+        cls.interface_client = cls.os_primary.interfaces_client
+        cls.flavors_client = cls.os_primary.flavors_client
+
+    def setup_network_client(cls):
+        """Neutron network client"""
+        cls.networks_client = cls.os_primary.networks_client
+        cls.ports_client = cls.os_primary.ports_client
+        cls.routers_client = cls.os_primary.routers_client
+        cls.subnets_client = cls.os_primary.subnets_client
+        cls.floating_ips_client = cls.os_primary.floating_ips_client
+        cls.security_groups_client = cls.os_primary.security_groups_client
+        cls.security_group_rules_client = (
+            cls.os_primary.security_group_rules_client)
+
     @classmethod
     def setup_clients(cls):
+        """This setup the service clients for the tests"""
         super(ScenarioTest, cls).setup_clients()
-        # Clients (in alphabetical order)
-        cls.flavors_client = cls.os_primary.flavors_client
-        cls.compute_floating_ips_client = (
-            cls.os_primary.compute_floating_ips_client)
         if CONF.service_available.glance:
             # Check if glance v1 is available to determine which client to use.
             if CONF.image_feature_enabled.api_v1:
@@ -107,40 +125,33 @@
                 raise lib_exc.InvalidConfiguration(
                     'Either api_v1 or api_v2 must be True in '
                     '[image-feature-enabled].')
-        # Compute image client
-        cls.compute_images_client = cls.os_primary.compute_images_client
-        cls.keypairs_client = cls.os_primary.keypairs_client
-        # Nova security groups client
-        cls.compute_security_groups_client = (
-            cls.os_primary.compute_security_groups_client)
-        cls.compute_security_group_rules_client = (
-            cls.os_primary.compute_security_group_rules_client)
-        cls.servers_client = cls.os_primary.servers_client
-        cls.interface_client = cls.os_primary.interfaces_client
-        # Neutron network client
-        cls.networks_client = cls.os_primary.networks_client
-        cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_primary.routers_client
-        cls.subnets_client = cls.os_primary.subnets_client
-        cls.floating_ips_client = cls.os_primary.floating_ips_client
-        cls.security_groups_client = cls.os_primary.security_groups_client
-        cls.security_group_rules_client = (
-            cls.os_primary.security_group_rules_client)
-        # Use the latest available volume clients
+
+        cls.setup_compute_client(cls)
+        cls.setup_network_client(cls)
         if CONF.service_available.cinder:
             cls.volumes_client = cls.os_primary.volumes_client_latest
             cls.snapshots_client = cls.os_primary.snapshots_client_latest
             cls.backups_client = cls.os_primary.backups_client_latest
 
     # ## Test functions library
-    #
     # The create_[resource] functions only return body and discard the
     # resp part which is not used in scenario tests
 
     def create_port(self, network_id, client=None, **kwargs):
+        """Creates port for the respective network_id
+
+        :param network_id: the id of the network
+        :param client: the client to use, defaults to self.ports_client
+        :param kwargs: additional arguments such as:
+            - namestart - a string to generate a name for the port from
+                        - default is self.__class__.__name__
+            - 'binding:vnic_type' - defaults to CONF.network.port_vnic_type
+            - 'binding:profile' - defaults to CONF.network.port_profile
+        """
         if not client:
             client = self.ports_client
-        name = data_utils.rand_name(self.__class__.__name__)
+        name = data_utils.rand_name(
+            kwargs.pop('namestart', self.__class__.__name__))
         if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
             kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
         if CONF.network.port_profile and 'binding:profile' not in kwargs:
@@ -149,18 +160,27 @@
             name=name,
             network_id=network_id,
             **kwargs)
+        self.assertIsNotNone(result, 'Unable to allocate port')
         port = result['port']
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         client.delete_port, port['id'])
         return port
 
-    def create_keypair(self, client=None):
+    def create_keypair(self, client=None, **kwargs):
+        """Creates keypair
+
+        Keypair is a public key of OpenSSH key pair used for accessing
+        and create servers
+        Keypair can also be created by a private key for the same purpose
+        Here, the keys are randomly generated[public/private]
+        """
         if not client:
             client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
+        if not kwargs.get('name'):
+            kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
         # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
+        body = client.create_keypair(**kwargs)
+        self.addCleanup(client.delete_keypair, kwargs['name'])
         return body['keypair']
 
     def create_server(self, name=None, image_id=None, flavor=None,
@@ -186,6 +206,14 @@
                 direct: an SR-IOV port that is directly attached to a VM
                 macvtap: an SR-IOV port that is attached to a VM via a macvtap
                          device.
+                direct-physical: an SR-IOV port that is directly attached to a
+                                 VM using physical instead of virtual
+                                 functions.
+                baremetal: a baremetal port directly attached to a baremetal
+                           node.
+                virtio-forwarder:  an SR-IOV port that is indirectly attached
+                                   to a VM using a low-latency vhost-user
+                                   forwarding process.
               Defaults to ``CONF.network.port_vnic_type``.
             * *port_profile* (``dict``) --
               This attribute is a dictionary that can be used (with admin
@@ -193,6 +221,9 @@
               the port.
               example: port_profile = "capabilities:[switchdev]"
               Defaults to ``CONF.network.port_profile``.
+            * *create_port_body* (``dict``) --
+              This attribute is a dictionary of additional arguments to be
+              passed to create_port method.
         """
 
         # NOTE(jlanoux): As a first step, ssh checks in the scenario
@@ -218,7 +249,7 @@
         # every network
         if vnic_type or profile:
             ports = []
-            create_port_body = {}
+            create_port_body = kwargs.pop('create_port_body', {})
 
             if vnic_type:
                 create_port_body['binding:vnic_type'] = vnic_type
@@ -293,7 +324,14 @@
         return server
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
-                      imageRef=None, volume_type=None):
+                      imageRef=None, volume_type=None, **kwargs):
+        """Creates volume
+
+        This wrapper utility creates volume and waits for volume to be
+        in 'available' state.
+        This method returns the volume's full representation by GET request.
+        """
+
         if size is None:
             size = CONF.volume.volume_size
         if imageRef:
@@ -306,11 +344,11 @@
             size = max(size, min_disk)
         if name is None:
             name = data_utils.rand_name(self.__class__.__name__ + "-volume")
-        kwargs = {'display_name': name,
-                  'snapshot_id': snapshot_id,
-                  'imageRef': imageRef,
-                  'volume_type': volume_type,
-                  'size': size}
+        kwargs.update({'name': name,
+                       'snapshot_id': snapshot_id,
+                       'imageRef': imageRef,
+                       'volume_type': volume_type,
+                       'size': size})
 
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
@@ -332,16 +370,35 @@
 
     def create_backup(self, volume_id, name=None, description=None,
                       force=False, snapshot_id=None, incremental=False,
-                      container=None):
+                      container=None, **kwargs):
+        """Creates a backup of the given volume_id or snapshot_id
+
+        This wrapper utility creates a backup and waits until it is in
+       'available' state.
+
+        :param volume_id: UUID of the volume to back up
+        :param name: backup name, '$classname-backup' by default
+        :param description: Description of the backup, None by default
+        :param force: boolean whether to backup even if the volume is attached
+            False by default
+        :param snapshot_id: UUID of the source snapshot to back up
+            None by default
+        :param incremental: boolean, False by default
+        :param container: a container name, None by default
+        :param **kwargs: additional parameters per the documentation:
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #create-a-backup
+        """
 
         name = name or data_utils.rand_name(
             self.__class__.__name__ + "-backup")
-        kwargs = {'name': name,
-                  'description': description,
-                  'force': force,
-                  'snapshot_id': snapshot_id,
-                  'incremental': incremental,
-                  'container': container}
+        args = {'name': name,
+                'description': description,
+                'force': force,
+                'snapshot_id': snapshot_id,
+                'incremental': incremental,
+                'container': container}
+        args.update(kwargs)
         backup = self.backups_client.create_backup(volume_id=volume_id,
                                                    **kwargs)['backup']
         self.addCleanup(self.backups_client.delete_backup, backup['id'])
@@ -349,8 +406,20 @@
                                                 backup['id'], 'available')
         return backup
 
-    def restore_backup(self, backup_id):
-        restore = self.backups_client.restore_backup(backup_id)['restore']
+    def restore_backup(self, backup_id, **kwargs):
+        """Restores a backup given by the backup_id
+
+        This wrapper utility restores a backup and waits until it is in
+        'available' state.
+
+        :param backup_id: UUID of a backup to restore
+        :param **kwargs: additional parameters per the documentation:
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #restore-a-backup
+        """
+
+        body = self.backups_client.restore_backup(backup_id, **kwargs)
+        restore = body['restore']
         self.addCleanup(self.volumes_client.delete_volume,
                         restore['volume_id'])
         waiters.wait_for_volume_resource_status(self.backups_client,
@@ -361,16 +430,48 @@
         self.assertEqual(backup_id, restore['backup_id'])
         return restore
 
+    def rebuild_server(self, server_id, image=None, preserve_ephemeral=False,
+                       wait=True, **kwargs):
+        if image is None:
+            image = CONF.compute.image_ref
+        LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
+                  server_id, image, preserve_ephemeral)
+        self.servers_client.rebuild_server(
+            server_id=server_id,
+            image_ref=image,
+            preserve_ephemeral=preserve_ephemeral,
+            **kwargs)
+        if wait:
+            waiters.wait_for_server_status(self.servers_client,
+                                           server_id, 'ACTIVE')
+
     def create_volume_snapshot(self, volume_id, name=None, description=None,
-                               metadata=None, force=False):
+                               metadata=None, force=False, **kwargs):
+        """Creates volume's snapshot
+
+        This wrapper utility creates volume snapshot and waits for it until
+        it is in 'available' state.
+
+        :param volume_id: UUID of a volume to create snapshot of
+        :param name: name of the snapshot, '$classname-snapshot' by default
+        :param description: description of the snapshot
+        :param metadata: metadata key and value pairs for the snapshot
+        :param force: whether snapshot even when the volume is attached
+        :param **kwargs: additional parameters per the doc
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #create-a-snapshot
+        """
+
         name = name or data_utils.rand_name(
             self.__class__.__name__ + '-snapshot')
         snapshot = self.snapshots_client.create_snapshot(
             volume_id=volume_id,
             force=force,
-            display_name=name,
+            name=name,
             description=description,
-            metadata=metadata)['snapshot']
+            metadata=metadata,
+            **kwargs)['snapshot']
+
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
         self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
@@ -380,7 +481,7 @@
             snapshot['id'])['snapshot']
         return snapshot
 
-    def _cleanup_volume_type(self, volume_type):
+    def cleanup_volume_type(self, volume_type):
         """Clean up a given volume type.
 
         Ensuring all volumes associated to a type are first removed before
@@ -399,7 +500,25 @@
             admin_volumes_client.wait_for_resource_deletion(volume['id'])
         admin_volume_type_client.delete_volume_type(volume_type['id'])
 
-    def create_volume_type(self, client=None, name=None, backend_name=None):
+    def create_volume_type(self, client=None, name=None, backend_name=None,
+                           **kwargs):
+        """Creates volume type
+
+        In a multiple-storage back-end configuration,
+        each back end has a name (volume_backend_name).
+        The name of the back end is declared as an extra-specification
+        of a volume type (such as, volume_backend_name=LVM).
+        When a volume is created, the scheduler chooses an
+        appropriate back end to handle the request, according
+        to the volume type specified by the user.
+        The scheduler uses volume types to explicitly create volumes on
+        specific back ends.
+
+        Before using volume type, a volume type has to be declared
+        to Block Storage. In addition to that, an extra-specification
+        has to be created to link the volume type to a back end name.
+        """
+
         if not client:
             client = self.os_admin.volume_types_client_latest
         if not name:
@@ -409,775 +528,83 @@
 
         LOG.debug("Creating a volume type: %s on backend %s",
                   randomized_name, backend_name)
-        extra_specs = {}
+        extra_specs = kwargs.pop("extra_specs", {})
         if backend_name:
-            extra_specs = {"volume_backend_name": backend_name}
+            extra_specs.update({"volume_backend_name": backend_name})
 
-        volume_type = client.create_volume_type(
-            name=randomized_name, extra_specs=extra_specs)['volume_type']
-        self.addCleanup(self._cleanup_volume_type, volume_type)
+        volume_type_resp = client.create_volume_type(
+            name=randomized_name, extra_specs=extra_specs, **kwargs)
+        volume_type = volume_type_resp['volume_type']
+
+        self.assertIn('id', volume_type)
+        self.addCleanup(self.cleanup_volume_type, volume_type)
         return volume_type
 
-    def _create_loginable_secgroup_rule(self, secgroup_id=None):
-        _client = self.compute_security_groups_client
-        _client_rules = self.compute_security_group_rules_client
-        if secgroup_id is None:
-            sgs = _client.list_security_groups()['security_groups']
-            for sg in sgs:
-                if sg['name'] == 'default':
-                    secgroup_id = sg['id']
-
-        # These rules are intended to permit inbound ssh and icmp
-        # traffic from all sources, so no group_id is provided.
-        # Setting a group_id would only permit traffic from ports
-        # belonging to the same security group.
-        rulesets = [
-            {
-                # ssh
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ping
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '0.0.0.0/0',
-            }
-        ]
-        rules = list()
-        for ruleset in rulesets:
-            sg_rule = _client_rules.create_security_group_rule(
-                parent_group_id=secgroup_id, **ruleset)['security_group_rule']
-            rules.append(sg_rule)
-        return rules
-
-    def _create_security_group(self):
-        # Create security group
-        sg_name = data_utils.rand_name(self.__class__.__name__)
-        sg_desc = sg_name + " description"
-        secgroup = self.compute_security_groups_client.create_security_group(
-            name=sg_name, description=sg_desc)['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(secgroup['description'], sg_desc)
-        self.addCleanup(
-            test_utils.call_and_ignore_notfound_exc,
-            self.compute_security_groups_client.delete_security_group,
-            secgroup['id'])
-
-        # Add rules to the security group
-        self._create_loginable_secgroup_rule(secgroup['id'])
-
-        return secgroup
-
-    def get_remote_client(self, ip_address, username=None, private_key=None,
-                          server=None):
-        """Get a SSH client to a remote server
-
-        :param ip_address: the server floating or fixed IP address to use
-                           for ssh validation
-        :param username: name of the Linux account on the remote server
-        :param private_key: the SSH private key to use
-        :param server: server dict, used for debugging purposes
-        :return: a RemoteClient object
-        """
-
-        if username is None:
-            username = CONF.validation.image_ssh_user
-        # Set this with 'keypair' or others to log in with keypair or
-        # username/password.
-        if CONF.validation.auth_method == 'keypair':
-            password = None
-            if private_key is None:
-                private_key = self.keypair['private_key']
-        else:
-            password = CONF.validation.image_ssh_password
-            private_key = None
-        linux_client = remote_client.RemoteClient(
-            ip_address, username, pkey=private_key, password=password,
-            server=server, servers_client=self.servers_client)
-        linux_client.validate_authentication()
-        return linux_client
-
-    def _image_create(self, name, fmt, path,
-                      disk_format=None, properties=None):
-        if properties is None:
-            properties = {}
-        name = data_utils.rand_name('%s-' % name)
-        params = {
-            'name': name,
-            'container_format': fmt,
-            'disk_format': disk_format or fmt,
-        }
-        if CONF.image_feature_enabled.api_v1:
-            params['is_public'] = 'False'
-            params['properties'] = properties
-            params = {'headers': common_image.image_meta_to_headers(**params)}
-        else:
-            params['visibility'] = 'private'
-            # Additional properties are flattened out in the v2 API.
-            params.update(properties)
-        body = self.image_client.create_image(**params)
-        image = body['image'] if 'image' in body else body
-        self.addCleanup(self.image_client.delete_image, image['id'])
-        self.assertEqual("queued", image['status'])
-        with open(path, 'rb') as image_file:
-            if CONF.image_feature_enabled.api_v1:
-                self.image_client.update_image(image['id'], data=image_file)
-            else:
-                self.image_client.store_image_file(image['id'], image_file)
-        return image['id']
-
-    def glance_image_create(self):
-        img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
-        aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
-        ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
-        ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
-        img_container_format = CONF.scenario.img_container_format
-        img_disk_format = CONF.scenario.img_disk_format
-        img_properties = CONF.scenario.img_properties
-        LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
-                  "properties: %s, ami: %s, ari: %s, aki: %s",
-                  img_path, img_container_format, img_disk_format,
-                  img_properties, ami_img_path, ari_img_path, aki_img_path)
-        try:
-            image = self._image_create('scenario-img',
-                                       img_container_format,
-                                       img_path,
-                                       disk_format=img_disk_format,
-                                       properties=img_properties)
-        except IOError:
-            LOG.warning(
-                "A(n) %s image was not found. Retrying with uec image.",
-                img_disk_format)
-            kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
-            ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
-            properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
-            image = self._image_create('scenario-ami', 'ami',
-                                       path=ami_img_path,
-                                       properties=properties)
-        LOG.debug("image:%s", image)
-
-        return image
-
-    def _log_console_output(self, servers=None, client=None):
-        if not CONF.compute_feature_enabled.console_output:
-            LOG.debug('Console output not supported, cannot log')
-            return
-        client = client or self.servers_client
-        if not servers:
-            servers = client.list_servers()
-            servers = servers['servers']
-        for server in servers:
-            try:
-                console_output = client.get_console_output(
-                    server['id'])['output']
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          server['id'], console_output)
-            except lib_exc.NotFound:
-                LOG.debug("Server %s disappeared(deleted) while looking "
-                          "for the console log", server['id'])
-
-    def _log_net_info(self, exc):
-        # network debug is called as part of ssh init
-        if not isinstance(exc, lib_exc.SSHTimeout):
-            LOG.debug('Network information on a devstack host')
-
-    def create_server_snapshot(self, server, name=None):
-        # Glance client
-        _image_client = self.image_client
-        # Compute client
-        _images_client = self.compute_images_client
-        if name is None:
-            name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
-        LOG.debug("Creating a snapshot image for server: %s", server['name'])
-        image = _images_client.create_image(server['id'], name=name)
-        image_id = image.response['location'].split('images/')[1]
-        waiters.wait_for_image_status(_image_client, image_id, 'active')
-
-        self.addCleanup(_image_client.wait_for_resource_deletion,
-                        image_id)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        _image_client.delete_image, image_id)
-
-        if CONF.image_feature_enabled.api_v1:
-            # In glance v1 the additional properties are stored in the headers.
-            resp = _image_client.check_image(image_id)
-            snapshot_image = common_image.get_image_meta_from_headers(resp)
-            image_props = snapshot_image.get('properties', {})
-        else:
-            # In glance v2 the additional properties are flattened.
-            snapshot_image = _image_client.show_image(image_id)
-            image_props = snapshot_image
-
-        bdm = image_props.get('block_device_mapping')
-        if bdm:
-            bdm = json.loads(bdm)
-            if bdm and 'snapshot_id' in bdm[0]:
-                snapshot_id = bdm[0]['snapshot_id']
-                self.addCleanup(
-                    self.snapshots_client.wait_for_resource_deletion,
-                    snapshot_id)
-                self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                                self.snapshots_client.delete_snapshot,
-                                snapshot_id)
-                waiters.wait_for_volume_resource_status(self.snapshots_client,
-                                                        snapshot_id,
-                                                        'available')
-        image_name = snapshot_image['name']
-        self.assertEqual(name, image_name)
-        LOG.debug("Created snapshot image %s for server %s",
-                  image_name, server['name'])
-        return snapshot_image
-
-    def nova_volume_attach(self, server, volume_to_attach):
-        volume = self.servers_client.attach_volume(
-            server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
-        self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'in-use')
-
-        # Return the updated volume after the attachment
-        return self.volumes_client.show_volume(volume['id'])['volume']
-
-    def nova_volume_detach(self, server, volume):
-        self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'available')
-
-    def ping_ip_address(self, ip_address, should_succeed=True,
-                        ping_timeout=None, mtu=None, server=None):
-        timeout = ping_timeout or CONF.validation.ping_timeout
-        cmd = ['ping', '-c1', '-w1']
-
-        if mtu:
-            cmd += [
-                # don't fragment
-                '-M', 'do',
-                # ping receives just the size of ICMP payload
-                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
-            ]
-        cmd.append(ip_address)
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-
-            return (proc.returncode == 0) == should_succeed
-
-        caller = test_utils.find_test_caller()
-        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
-                  ' expected result is %(should_succeed)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'should_succeed':
-                      'reachable' if should_succeed else 'unreachable'
-                  })
-        result = test_utils.call_until_true(ping, timeout, 1)
-        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
-                  'ping result is %(result)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'result': 'expected' if result else 'unexpected'
-                  })
-        if server:
-            self._log_console_output([server])
-        return result
-
-    def check_vm_connectivity(self, ip_address,
-                              username=None,
-                              private_key=None,
-                              should_connect=True,
-                              extra_msg="",
-                              server=None,
-                              mtu=None):
-        """Check server connectivity
-
-        :param ip_address: server to test against
-        :param username: server's ssh username
-        :param private_key: server's ssh private key to be used
-        :param should_connect: True/False indicates positive/negative test
-            positive - attempt ping and ssh
-            negative - attempt ping and fail if succeed
-        :param extra_msg: Message to help with debugging if ``ping_ip_address``
-            fails
-        :param server: The server whose console to log for debugging
-        :param mtu: network MTU to use for connectivity validation
-
-        :raises: AssertError if the result of the connectivity check does
-            not match the value of the should_connect param
-        """
-        LOG.debug('checking network connections to IP %s with user: %s',
-                  ip_address, username)
-        if should_connect:
-            msg = "Timed out waiting for %s to become reachable" % ip_address
-        else:
-            msg = "ip address %s is reachable" % ip_address
-        if extra_msg:
-            msg = "%s\n%s" % (extra_msg, msg)
-        self.assertTrue(self.ping_ip_address(ip_address,
-                                             should_succeed=should_connect,
-                                             mtu=mtu, server=server),
-                        msg=msg)
-        if should_connect:
-            # no need to check ssh for negative connectivity
-            try:
-                self.get_remote_client(ip_address, username, private_key,
-                                       server=server)
-            except Exception:
-                if not extra_msg:
-                    extra_msg = 'Failed to ssh to %s' % ip_address
-                LOG.exception(extra_msg)
-                raise
-
-    def create_floating_ip(self, thing, pool_name=None):
-        """Create a floating IP and associates to a server on Nova"""
-
-        if not pool_name:
-            pool_name = CONF.network.floating_network_name
-        floating_ip = (self.compute_floating_ips_client.
-                       create_floating_ip(pool=pool_name)['floating_ip'])
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.compute_floating_ips_client.delete_floating_ip,
-                        floating_ip['id'])
-        self.compute_floating_ips_client.associate_floating_ip_to_server(
-            floating_ip['ip'], thing['id'])
-        return floating_ip
-
-    def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                         private_key=None, server=None):
-        ssh_client = self.get_remote_client(ip_address,
-                                            private_key=private_key,
-                                            server=server)
-        if dev_name is not None:
-            ssh_client.make_fs(dev_name)
-            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
-                                                               mount_path))
-        cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
-        ssh_client.exec_command(cmd_timestamp)
-        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
-        if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
-        return timestamp
-
-    def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                      private_key=None, server=None):
-        ssh_client = self.get_remote_client(ip_address,
-                                            private_key=private_key,
-                                            server=server)
-        if dev_name is not None:
-            ssh_client.mount(dev_name, mount_path)
-        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
-        if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
-        return timestamp
-
-    def get_server_ip(self, server):
-        """Get the server fixed or floating IP.
-
-        Based on the configuration we're in, return a correct ip
-        address for validating that a guest is up.
-        """
-        if CONF.validation.connect_method == 'floating':
-            # The tests calling this method don't have a floating IP
-            # and can't make use of the validation resources. So the
-            # method is creating the floating IP there.
-            return self.create_floating_ip(server)['ip']
-        elif CONF.validation.connect_method == 'fixed':
-            # Determine the network name to look for based on config or creds
-            # provider network resources.
-            if CONF.validation.network_for_ssh:
-                addresses = server['addresses'][
-                    CONF.validation.network_for_ssh]
-            else:
-                network = self.get_tenant_network()
-                addresses = (server['addresses'][network['name']]
-                             if network else [])
-            for address in addresses:
-                if (address['version'] == CONF.validation.ip_version_for_ssh and  # noqa
-                        address['OS-EXT-IPS:type'] == 'fixed'):
-                    return address['addr']
-            raise exceptions.ServerUnreachable(server_id=server['id'])
-        else:
-            raise lib_exc.InvalidConfiguration()
-
-    @classmethod
-    def get_host_for_server(cls, server_id):
-        server_details = cls.os_admin.servers_client.show_server(server_id)
-        return server_details['server']['OS-EXT-SRV-ATTR:host']
-
-    def _get_bdm(self, source_id, source_type, delete_on_termination=False):
-        bd_map_v2 = [{
-            'uuid': source_id,
-            'source_type': source_type,
-            'destination_type': 'volume',
-            'boot_index': 0,
-            'delete_on_termination': delete_on_termination}]
-        return {'block_device_mapping_v2': bd_map_v2}
-
-    def boot_instance_from_resource(self, source_id,
-                                    source_type,
-                                    keypair=None,
-                                    security_group=None,
-                                    delete_on_termination=False,
-                                    name=None):
-        create_kwargs = dict()
-        if keypair:
-            create_kwargs['key_name'] = keypair['name']
-        if security_group:
-            create_kwargs['security_groups'] = [
-                {'name': security_group['name']}]
-        create_kwargs.update(self._get_bdm(
-            source_id,
-            source_type,
-            delete_on_termination=delete_on_termination))
-        if name:
-            create_kwargs['name'] = name
-
-        return self.create_server(image_id='', **create_kwargs)
-
-    def create_volume_from_image(self):
-        img_uuid = CONF.compute.image_ref
-        vol_name = data_utils.rand_name(
-            self.__class__.__name__ + '-volume-origin')
-        return self.create_volume(name=vol_name, imageRef=img_uuid)
-
-
-class NetworkScenarioTest(ScenarioTest):
-    """Base class for network scenario tests.
-
-    This class provide helpers for network scenario tests, using the neutron
-    API. Helpers from ancestor which use the nova network API are overridden
-    with the neutron API.
-
-    This Class also enforces using Neutron instead of novanetwork.
-    Subclassed tests will be skipped if Neutron is not enabled
-
-    """
-
-    credentials = ['primary', 'admin']
-
-    @classmethod
-    def skip_checks(cls):
-        super(NetworkScenarioTest, cls).skip_checks()
-        if not CONF.service_available.neutron:
-            raise cls.skipException('Neutron not available')
-
-    def _create_network(self, networks_client=None,
-                        tenant_id=None,
-                        namestart='network-smoke-',
-                        port_security_enabled=True, **net_dict):
-        if not networks_client:
-            networks_client = self.networks_client
-        if not tenant_id:
-            tenant_id = networks_client.tenant_id
-        name = data_utils.rand_name(namestart)
-        network_kwargs = dict(name=name, tenant_id=tenant_id)
-        if net_dict:
-            network_kwargs.update(net_dict)
-        # Neutron disables port security by default so we have to check the
-        # config before trying to create the network with port_security_enabled
-        if CONF.network_feature_enabled.port_security:
-            network_kwargs['port_security_enabled'] = port_security_enabled
-        result = networks_client.create_network(**network_kwargs)
-        network = result['network']
-
-        self.assertEqual(network['name'], name)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        networks_client.delete_network,
-                        network['id'])
-        return network
-
-    def create_subnet(self, network, subnets_client=None,
-                      namestart='subnet-smoke', **kwargs):
-        """Create a subnet for the given network
-
-        within the cidr block configured for tenant networks.
-        """
-        if not subnets_client:
-            subnets_client = self.subnets_client
-
-        def cidr_in_use(cidr, tenant_id):
-            """Check cidr existence
-
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
-            """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
-                tenant_id=tenant_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
-
-        ip_version = kwargs.pop('ip_version', 4)
-
-        if ip_version == 6:
-            tenant_cidr = netaddr.IPNetwork(
-                CONF.network.project_network_v6_cidr)
-            num_bits = CONF.network.project_network_v6_mask_bits
-        else:
-            tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            num_bits = CONF.network.project_network_mask_bits
-
-        result = None
-        str_cidr = None
-        # Repeatedly attempt subnet creation with sequential cidr
-        # blocks until an unallocated block is found.
-        for subnet_cidr in tenant_cidr.subnet(num_bits):
-            str_cidr = str(subnet_cidr)
-            if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
-                continue
-
-            subnet = dict(
-                name=data_utils.rand_name(namestart),
-                network_id=network['id'],
-                tenant_id=network['tenant_id'],
-                cidr=str_cidr,
-                ip_version=ip_version,
-                **kwargs
-            )
-            try:
-                result = subnets_client.create_subnet(**subnet)
-                break
-            except lib_exc.Conflict as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
-                    raise
-        self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
-        subnet = result['subnet']
-        self.assertEqual(subnet['cidr'], str_cidr)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        subnets_client.delete_subnet, subnet['id'])
-
-        return subnet
-
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        if ip_addr:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'],
-                fixed_ips='ip_address=%s' % ip_addr)['ports']
-        else:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'])['ports']
-        # A port can have more than one IP address in some cases.
-        # If the network is dual-stack (IPv4 + IPv6), this port is associated
-        # with 2 subnets
-        p_status = ['ACTIVE']
-        # NOTE(vsaienko) With Ironic, instances live on separate hardware
-        # servers. Neutron does not bind ports for Ironic instances, as a
-        # result the port remains in the DOWN state.
-        # TODO(vsaienko) remove once bug: #1599836 is resolved.
-        if getattr(CONF.service_available, 'ironic', False):
-            p_status.append('DOWN')
-        port_map = [(p["id"], fxip["ip_address"])
-                    for p in ports
-                    for fxip in p["fixed_ips"]
-                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
-                        p['status'] in p_status)]
-        inactive = [p for p in ports if p['status'] != 'ACTIVE']
-        if inactive:
-            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
-        self.assertNotEmpty(port_map,
-                            "No IPv4 addresses found in: %s" % ports)
-        self.assertEqual(len(port_map), 1,
-                         "Found multiple IPv4 addresses: %s. "
-                         "Unable to determine which port to target."
-                         % port_map)
-        return port_map[0]
-
-    def _get_network_by_name(self, network_name):
-        net = self.os_admin.networks_client.list_networks(
-            name=network_name)['networks']
-        self.assertNotEmpty(net,
-                            "Unable to get network by name: %s" % network_name)
-        return net[0]
-
-    def create_floating_ip(self, thing, external_network_id=None,
-                           port_id=None, client=None):
-        """Create a floating IP and associates to a resource/port on Neutron"""
-        if not external_network_id:
-            external_network_id = CONF.network.public_network_id
-        if not client:
-            client = self.floating_ips_client
-        if not port_id:
-            port_id, ip4 = self._get_server_port_id_and_ip4(thing)
-        else:
-            ip4 = None
-        result = client.create_floatingip(
-            floating_network_id=external_network_id,
-            port_id=port_id,
-            tenant_id=thing['tenant_id'],
-            fixed_ip_address=ip4
-        )
-        floating_ip = result['floatingip']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_floatingip,
-                        floating_ip['id'])
-        return floating_ip
-
-    def check_floating_ip_status(self, floating_ip, status):
-        """Verifies floatingip reaches the given status
-
-        :param dict floating_ip: floating IP dict to check status
-        :param status: target status
-        :raises: AssertionError if status doesn't match
-        """
-        floatingip_id = floating_ip['id']
-
-        def refresh():
-            result = (self.floating_ips_client.
-                      show_floatingip(floatingip_id)['floatingip'])
-            return status == result['status']
-
-        if not test_utils.call_until_true(refresh,
-                                          CONF.network.build_timeout,
-                                          CONF.network.build_interval):
-            floating_ip = self.floating_ips_client.show_floatingip(
-                floatingip_id)['floatingip']
-            self.assertEqual(status, floating_ip['status'],
-                             message="FloatingIP: {fp} is at status: {cst}. "
-                                     "failed  to reach status: {st}"
-                             .format(fp=floating_ip, cst=floating_ip['status'],
-                                     st=status))
-        LOG.info("FloatingIP: {fp} is at status: {st}"
-                 .format(fp=floating_ip, st=status))
-
-    def check_tenant_network_connectivity(self, server,
-                                          username,
-                                          private_key,
-                                          should_connect=True,
-                                          servers_for_debug=None):
-        if not CONF.network.project_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for ip_addresses in server['addresses'].values():
-                for ip_address in ip_addresses:
-                    self.check_vm_connectivity(ip_address['addr'],
-                                               username,
-                                               private_key,
-                                               should_connect=should_connect)
-        except Exception as e:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
-            self._log_net_info(e)
-            raise
-
-    def check_remote_connectivity(self, source, dest, should_succeed=True,
-                                  nic=None, protocol='icmp'):
-        """check server connectivity via source ssh connection
-
-        :param source: RemoteClient: an ssh connection from which to execute
-            the check
-        :param dest: an IP to check connectivity against
-        :param should_succeed: boolean should connection succeed or not
-        :param nic: specific network interface to test connectivity from
-        :param protocol: the protocol used to test connectivity with.
-        :returns: True, if the connection succeeded and it was expected to
-            succeed. False otherwise.
-        """
-        method_name = '%s_check' % protocol
-        connectivity_checker = getattr(source, method_name)
-
-        def connect_remote():
-            try:
-                connectivity_checker(dest, nic=nic)
-            except lib_exc.SSHExecCommandFailed:
-                LOG.warning('Failed to check %(protocol)s connectivity for '
-                            'IP %(dest)s via a ssh connection from: %(src)s.',
-                            dict(protocol=protocol, dest=dest,
-                                 src=source.ssh_client.host))
-                return not should_succeed
-            return should_succeed
-
-        result = test_utils.call_until_true(connect_remote,
-                                            CONF.validation.ping_timeout, 1)
-        if result:
-            return
-
-        source_host = source.ssh_client.host
-        if should_succeed:
-            msg = "Timed out waiting for %s to become reachable from %s" \
-                % (dest, source_host)
-        else:
-            msg = "%s is reachable from %s" % (dest, source_host)
-        self._log_console_output()
-        self.fail(msg)
-
-    def _create_security_group(self, security_group_rules_client=None,
-                               tenant_id=None,
-                               namestart='secgroup-smoke',
-                               security_groups_client=None):
+    def create_security_group(self, security_group_rules_client=None,
+                              project_id=None,
+                              namestart='secgroup-smoke',
+                              security_groups_client=None):
         if security_group_rules_client is None:
             security_group_rules_client = self.security_group_rules_client
         if security_groups_client is None:
             security_groups_client = self.security_groups_client
-        if tenant_id is None:
-            tenant_id = security_groups_client.tenant_id
-        secgroup = self._create_empty_security_group(
+        if project_id is None:
+            project_id = security_groups_client.project_id
+        secgroup = self.create_empty_security_group(
             namestart=namestart, client=security_groups_client,
-            tenant_id=tenant_id)
+            project_id=project_id)
 
         # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(
+        rules = self.create_loginable_secgroup_rule(
             security_group_rules_client=security_group_rules_client,
             secgroup=secgroup,
             security_groups_client=security_groups_client)
         for rule in rules:
-            self.assertEqual(tenant_id, rule['tenant_id'])
+            self.assertEqual(project_id, rule['project_id'])
             self.assertEqual(secgroup['id'], rule['security_group_id'])
         return secgroup
 
-    def _create_empty_security_group(self, client=None, tenant_id=None,
-                                     namestart='secgroup-smoke'):
+    def create_empty_security_group(self, client=None, project_id=None,
+                                    namestart='secgroup-smoke'):
         """Create a security group without rules.
 
         Default rules will be created:
          - IPv4 egress to any
          - IPv6 egress to any
-
-        :param tenant_id: secgroup will be created in this tenant
+        :param project_id: secgroup will be created in this project
         :returns: the created security group
         """
+
         if client is None:
             client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
+        if not project_id:
+            project_id = client.project_id
         sg_name = data_utils.rand_name(namestart)
         sg_desc = sg_name + " description"
         sg_dict = dict(name=sg_name,
                        description=sg_desc)
-        sg_dict['tenant_id'] = tenant_id
+        sg_dict['project_id'] = project_id
         result = client.create_security_group(**sg_dict)
 
         secgroup = result['security_group']
         self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(tenant_id, secgroup['tenant_id'])
+        self.assertEqual(project_id, secgroup['project_id'])
         self.assertEqual(secgroup['description'], sg_desc)
 
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         client.delete_security_group, secgroup['id'])
         return secgroup
 
-    def _create_security_group_rule(self, secgroup=None,
-                                    sec_group_rules_client=None,
-                                    tenant_id=None,
-                                    security_groups_client=None, **kwargs):
+    def create_security_group_rule(self, secgroup=None,
+                                   sec_group_rules_client=None,
+                                   project_id=None,
+                                   security_groups_client=None, **kwargs):
         """Create a rule from a dictionary of rule parameters.
 
         Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in tenant_id.
-
+        default secgroup in project_id.
         :param secgroup: the security group.
-        :param tenant_id: if secgroup not passed -- the tenant in which to
+        :param project_id: if secgroup not passed -- the tenant in which to
             search for default secgroup
         :param kwargs: a dictionary containing rule parameters:
             for example, to allow incoming ssh:
@@ -1188,22 +615,23 @@
                     port_range_max: 22
                     }
         """
+
         if sec_group_rules_client is None:
             sec_group_rules_client = self.security_group_rules_client
         if security_groups_client is None:
             security_groups_client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = security_groups_client.tenant_id
+        if not project_id:
+            project_id = security_groups_client.project_id
         if secgroup is None:
-            # Get default secgroup for tenant_id
+            # Get default secgroup for project_id
             default_secgroups = security_groups_client.list_security_groups(
-                name='default', tenant_id=tenant_id)['security_groups']
-            msg = "No default security group for tenant %s." % (tenant_id)
+                name='default', project_id=project_id)['security_groups']
+            msg = "No default security group for project %s." % (project_id)
             self.assertNotEmpty(default_secgroups, msg)
             secgroup = default_secgroups[0]
 
         ruleset = dict(security_group_id=secgroup['id'],
-                       tenant_id=secgroup['tenant_id'])
+                       project_id=secgroup['project_id'])
         ruleset.update(kwargs)
 
         sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
@@ -1214,10 +642,10 @@
 
         return sg_rule
 
-    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
-                                        secgroup=None,
-                                        security_groups_client=None):
-        """Create loginable security group rule
+    def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+                                       secgroup=None,
+                                       security_groups_client=None):
+        """Create loginable security group rule by neutron clients by default.
 
         This function will create:
         1. egress and ingress tcp port 22 allow rule in order to allow ssh
@@ -1253,7 +681,7 @@
             for r_direction in ['ingress', 'egress']:
                 ruleset['direction'] = r_direction
                 try:
-                    sg_rule = self._create_security_group_rule(
+                    sg_rule = self.create_security_group_rule(
                         sec_group_rules_client=sec_group_rules_client,
                         secgroup=secgroup,
                         security_groups_client=security_groups_client,
@@ -1269,7 +697,762 @@
 
         return rules
 
-    def _get_router(self, client=None, tenant_id=None):
+    def get_remote_client(self, ip_address, username=None, private_key=None,
+                          server=None):
+        """Get a SSH client to a remote server
+
+        :param ip_address: the server floating or fixed IP address to use
+                           for ssh validation
+        :param username: name of the Linux account on the remote server
+        :param private_key: the SSH private key to use
+        :param server: server dict, used for debugging purposes
+        :return: a RemoteClient object
+        """
+
+        if username is None:
+            username = CONF.validation.image_ssh_user
+        # Set this with 'keypair' or others to log in with keypair or
+        # username/password.
+        if CONF.validation.auth_method == 'keypair':
+            password = None
+            if private_key is None:
+                private_key = self.keypair['private_key']
+        else:
+            password = CONF.validation.image_ssh_password
+            private_key = None
+        linux_client = remote_client.RemoteClient(
+            ip_address, username, pkey=private_key, password=password,
+            server=server, servers_client=self.servers_client)
+        linux_client.validate_authentication()
+        return linux_client
+
+    def image_create(self, name='scenario-img', **kwargs):
+        img_path = CONF.scenario.img_file
+        if not os.path.exists(img_path):
+            lib_exc.InvalidConfiguration(
+                'Starting Tempest 25.0.0 release, CONF.scenario.img_file need '
+                'a full path for the image. CONF.scenario.img_dir was '
+                'deprecated and will be removed in the next release. Till '
+                'Tempest 25.0.0, old behavior was maintained and kept working '
+                'but starting Tempest 26.0.0, you need to specify the full '
+                'path in CONF.scenario.img_file config option.')
+        img_container_format = CONF.scenario.img_container_format
+        img_disk_format = CONF.scenario.img_disk_format
+        img_properties = CONF.scenario.img_properties
+        LOG.debug("paths: img: %s, container_format: %s, disk_format: %s, "
+                  "properties: %s",
+                  img_path, img_container_format, img_disk_format,
+                  img_properties)
+        if img_properties is None:
+            img_properties = {}
+        name = data_utils.rand_name('%s-' % name)
+        params = {
+            'name': name,
+            'container_format': img_container_format,
+            'disk_format': img_disk_format or img_container_format,
+        }
+        if CONF.image_feature_enabled.api_v1:
+            params['is_public'] = 'False'
+            if img_properties:
+                params['properties'] = img_properties
+            params = {'headers': common_image.image_meta_to_headers(**params)}
+        else:
+            params['visibility'] = 'private'
+            # Additional properties are flattened out in the v2 API.
+            if img_properties:
+                params.update(img_properties)
+        params.update(kwargs)
+        body = self.image_client.create_image(**params)
+        image = body['image'] if 'image' in body else body
+        self.addCleanup(self.image_client.delete_image, image['id'])
+        self.assertEqual("queued", image['status'])
+        with open(img_path, 'rb') as image_file:
+            if CONF.image_feature_enabled.api_v1:
+                self.image_client.update_image(image['id'], data=image_file)
+            else:
+                self.image_client.store_image_file(image['id'], image_file)
+        LOG.debug("image:%s", image['id'])
+        return image['id']
+
+    def log_console_output(self, servers=None, client=None, **kwargs):
+        """Console log output"""
+        if not CONF.compute_feature_enabled.console_output:
+            LOG.debug('Console output not supported, cannot log')
+            return
+        client = client or self.servers_client
+        if not servers:
+            servers = client.list_servers()
+            servers = servers['servers']
+        for server in servers:
+            try:
+                console_output = client.get_console_output(
+                    server['id'], **kwargs)['output']
+                LOG.debug('Console output for %s\nbody=\n%s',
+                          server['id'], console_output)
+            except lib_exc.NotFound:
+                LOG.debug("Server %s disappeared(deleted) while looking "
+                          "for the console log", server['id'])
+
+    def _log_net_info(self, exc):
+        """network debug is called as part of ssh init"""
+        if not isinstance(exc, lib_exc.SSHTimeout):
+            LOG.debug('Network information on a devstack host')
+
+    def create_server_snapshot(self, server, name=None, **kwargs):
+        """Creates server snapshot"""
+        # Glance client
+        _image_client = self.image_client
+        # Compute client
+        _images_client = self.compute_images_client
+        if name is None:
+            name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
+        LOG.debug("Creating a snapshot image for server: %s", server['name'])
+        image = _images_client.create_image(server['id'], name=name, **kwargs)
+        image_id = image.response['location'].split('images/')[1]
+        waiters.wait_for_image_status(_image_client, image_id, 'active')
+
+        self.addCleanup(_image_client.wait_for_resource_deletion,
+                        image_id)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        _image_client.delete_image, image_id)
+
+        if CONF.image_feature_enabled.api_v1:
+            # In glance v1 the additional properties are stored in the headers
+            resp = _image_client.check_image(image_id)
+            snapshot_image = common_image.get_image_meta_from_headers(resp)
+            image_props = snapshot_image.get('properties', {})
+        else:
+            # In glance v2 the additional properties are flattened.
+            snapshot_image = _image_client.show_image(image_id)
+            image_props = snapshot_image
+
+        bdm = image_props.get('block_device_mapping')
+        if bdm:
+            bdm = json.loads(bdm)
+            if bdm and 'snapshot_id' in bdm[0]:
+                snapshot_id = bdm[0]['snapshot_id']
+                self.addCleanup(
+                    self.snapshots_client.wait_for_resource_deletion,
+                    snapshot_id)
+                self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                                self.snapshots_client.delete_snapshot,
+                                snapshot_id)
+                waiters.wait_for_volume_resource_status(self.snapshots_client,
+                                                        snapshot_id,
+                                                        'available')
+        image_name = snapshot_image['name']
+        self.assertEqual(name, image_name)
+        LOG.debug("Created snapshot image %s for server %s",
+                  image_name, server['name'])
+        return snapshot_image
+
+    def nova_volume_attach(self, server, volume_to_attach, **kwargs):
+        """Compute volume attach
+
+        This utility attaches volume from compute and waits for the
+        volume status to be 'in-use' state.
+        """
+        volume = self.servers_client.attach_volume(
+            server['id'], volumeId=volume_to_attach['id'],
+            **kwargs)['volumeAttachment']
+        self.assertEqual(volume_to_attach['id'], volume['id'])
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'in-use')
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.nova_volume_detach, server, volume)
+        # Return the updated volume after the attachment
+        return self.volumes_client.show_volume(volume['id'])['volume']
+
+    def nova_volume_detach(self, server, volume):
+        """Compute volume detach
+
+        This utility detaches the volume from the server and checks whether the
+        volume attachment has been removed from Nova.
+        """
+        self.servers_client.detach_volume(server['id'], volume['id'])
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], volume['id'])
+
+    def ping_ip_address(self, ip_address, should_succeed=True,
+                        ping_timeout=None, mtu=None, server=None):
+        """ping ip address"""
+        timeout = ping_timeout or CONF.validation.ping_timeout
+        cmd = ['ping', '-c1', '-w1']
+
+        if mtu:
+            cmd += [
+                # don't fragment
+                '-M', 'do',
+                # ping receives just the size of ICMP payload
+                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
+            ]
+        cmd.append(ip_address)
+
+        def ping():
+            proc = subprocess.Popen(cmd,
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.PIPE)
+            proc.communicate()
+
+            return (proc.returncode == 0) == should_succeed
+
+        caller = test_utils.find_test_caller()
+        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
+                  ' expected result is %(should_succeed)s', {
+                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
+                      'should_succeed':
+                      'reachable' if should_succeed else 'unreachable'
+                  })
+        result = test_utils.call_until_true(ping, timeout, 1)
+        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
+                  'ping result is %(result)s', {
+                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
+                      'result': 'expected' if result else 'unexpected'
+                  })
+        if server:
+            self.log_console_output([server])
+        return result
+
+    def check_vm_connectivity(self, ip_address,
+                              username=None,
+                              private_key=None,
+                              should_connect=True,
+                              extra_msg="",
+                              server=None,
+                              mtu=None):
+        """Check server connectivity
+
+        :param ip_address: server to test against
+        :param username: server's ssh username
+        :param private_key: server's ssh private key to be used
+        :param should_connect: True/False indicates positive/negative test
+            positive - attempt ping and ssh
+            negative - attempt ping and fail if succeed
+        :param extra_msg: Message to help with debugging if ``ping_ip_address``
+            fails
+        :param server: The server whose console to log for debugging
+        :param mtu: network MTU to use for connectivity validation
+
+        :raises: AssertError if the result of the connectivity check does
+            not match the value of the should_connect param
+        """
+
+        LOG.debug('checking network connections to IP %s with user: %s',
+                  ip_address, username)
+        if should_connect:
+            msg = "Timed out waiting for %s to become reachable" % ip_address
+        else:
+            msg = "ip address %s is reachable" % ip_address
+        if extra_msg:
+            msg = "%s\n%s" % (extra_msg, msg)
+        self.assertTrue(self.ping_ip_address(ip_address,
+                                             should_succeed=should_connect,
+                                             mtu=mtu, server=server),
+                        msg=msg)
+        if should_connect:
+            # no need to check ssh for negative connectivity
+            try:
+                self.get_remote_client(ip_address, username, private_key,
+                                       server=server)
+            except Exception:
+                if not extra_msg:
+                    extra_msg = 'Failed to ssh to %s' % ip_address
+                LOG.exception(extra_msg)
+                raise
+
+    def get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
+
+        if ip_addr and not kwargs.get('fixed_ips'):
+            kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+        ports = self.os_admin.ports_client.list_ports(
+            device_id=server['id'], **kwargs)['ports']
+
+        # A port can have more than one IP address in some cases.
+        # If the network is dual-stack (IPv4 + IPv6), this port is associated
+        # with 2 subnets
+
+        def _is_active(port):
+            # NOTE(vsaienko) With Ironic, instances live on separate hardware
+            # servers. Neutron does not bind ports for Ironic instances, as a
+            # result the port remains in the DOWN state. This has been fixed
+            # with the introduction of the networking-baremetal plugin but
+            # it's not mandatory (and is not used on all stable branches).
+            return (port['status'] == 'ACTIVE' or
+                    port.get('binding:vnic_type') == 'baremetal')
+
+        port_map = [(p["id"], fxip["ip_address"])
+                    for p in ports
+                    for fxip in p["fixed_ips"]
+                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
+                        _is_active(p))]
+        inactive = [p for p in ports if p['status'] != 'ACTIVE']
+        if inactive:
+            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+        self.assertNotEmpty(port_map,
+                            "No IPv4 addresses found in: %s" % ports)
+        self.assertEqual(len(port_map), 1,
+                         "Found multiple IPv4 addresses: %s. "
+                         "Unable to determine which port to target."
+                         % port_map)
+        return port_map[0]
+
+    def create_floating_ip(self, server, external_network_id=None,
+                           port_id=None, client=None, **kwargs):
+        """Create a floating IP and associates to a resource/port on Neutron"""
+
+        if not external_network_id:
+            external_network_id = CONF.network.public_network_id
+        if not client:
+            client = self.floating_ips_client
+        if not port_id:
+            port_id, ip4 = self.get_server_port_id_and_ip4(server)
+        else:
+            ip4 = None
+
+        floatingip_kwargs = {
+            'floating_network_id': external_network_id,
+            'port_id': port_id,
+            'tenant_id': server.get('project_id') or server['tenant_id'],
+            'fixed_ip_address': ip4,
+        }
+        if CONF.network.subnet_id:
+            floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+        floatingip_kwargs.update(kwargs)
+        result = client.create_floatingip(**floatingip_kwargs)
+        floating_ip = result['floatingip']
+
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        client.delete_floatingip,
+                        floating_ip['id'])
+        return floating_ip
+
+    def associate_floating_ip(self, floating_ip, server):
+        """Associate floating ip to server
+
+        This wrapper utility attaches the floating_ip for
+        the respective port_id of server
+        """
+        port_id, _ = self.get_server_port_id_and_ip4(server)
+        kwargs = dict(port_id=port_id)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], **kwargs)['floatingip']
+        self.assertEqual(port_id, floating_ip['port_id'])
+        return floating_ip
+
+    def disassociate_floating_ip(self, floating_ip):
+        """Disassociates floating ip
+
+        This wrapper utility disassociates given floating ip.
+        :param floating_ip: a dict which is a return value of
+        floating_ips_client.create_floatingip method
+        """
+        kwargs = dict(port_id=None)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], **kwargs)['floatingip']
+        self.assertIsNone(floating_ip['port_id'])
+        return floating_ip
+
+    def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
+                         private_key=None, server=None, username=None,
+                         fs='ext4'):
+        """Creates timestamp
+
+        This wrapper utility does ssh, creates timestamp and returns the
+        created timestamp.
+        """
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+
+        if dev_name is not None:
+            ssh_client.make_fs(dev_name, fs=fs)
+            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+                                                               mount_path))
+        cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
+        ssh_client.exec_command(cmd_timestamp)
+        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
+                                            % mount_path)
+        if dev_name is not None:
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        return timestamp
+
+    def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
+                      private_key=None, server=None, username=None):
+        """Returns timestamp
+
+        This wrapper utility does ssh and returns the timestamp.
+
+        :param ip_address: The floating IP or fixed IP of the remote server
+        :param dev_name: Name of the device that stores the timestamp
+        :param mount_path: Path which should be used as mount point for
+                           dev_name
+        :param private_key: The SSH private key to use for authentication
+        :param server: Server dict, used for debugging purposes
+        :param username: Name of the Linux account on the remote server
+        """
+
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+
+        if dev_name is not None:
+            ssh_client.mount(dev_name, mount_path)
+        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
+                                            % mount_path)
+        if dev_name is not None:
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        return timestamp
+
+    def get_server_ip(self, server, **kwargs):
+        """Get the server fixed or floating IP.
+
+        Based on the configuration we're in, return a correct ip
+        address for validating that a guest is up.
+
+        If CONF.validation.connect_method is floating, then
+        a floating ip will be created passing kwargs as additional
+        argument.
+        """
+
+        if CONF.validation.connect_method == 'floating':
+            # The tests calling this method don't have a floating IP
+            # and can't make use of the validation resources. So the
+            # method is creating the floating IP there.
+            return self.create_floating_ip(
+                server, **kwargs)['floating_ip_address']
+        elif CONF.validation.connect_method == 'fixed':
+            # Determine the network name to look for based on config or creds
+            # provider network resources.
+            if CONF.validation.network_for_ssh:
+                addresses = server['addresses'][
+                    CONF.validation.network_for_ssh]
+            else:
+                network = self.get_tenant_network()
+                addresses = (server['addresses'][network['name']]
+                             if network else [])
+            for address in addresses:
+                if (address['version'] == CONF.validation.ip_version_for_ssh and  # noqa
+                        address['OS-EXT-IPS:type'] == 'fixed'):
+                    return address['addr']
+            raise exceptions.ServerUnreachable(server_id=server['id'])
+        else:
+            raise lib_exc.InvalidConfiguration()
+
+    @classmethod
+    def get_host_for_server(cls, server_id):
+        """Gets host of server"""
+
+        server_details = cls.os_admin.servers_client.show_server(server_id)
+        return server_details['server']['OS-EXT-SRV-ATTR:host']
+
+    def _get_bdm(self, source_id, source_type, delete_on_termination=False):
+        bd_map_v2 = [{
+            'uuid': source_id,
+            'source_type': source_type,
+            'destination_type': 'volume',
+            'boot_index': 0,
+            'delete_on_termination': delete_on_termination}]
+        return {'block_device_mapping_v2': bd_map_v2}
+
+    def boot_instance_from_resource(self, source_id,
+                                    source_type,
+                                    keypair=None,
+                                    security_group=None,
+                                    delete_on_termination=False,
+                                    name=None, **kwargs):
+        """Boot instance from resource
+
+        This wrapper utility boots instance from resource with block device
+        mapping with source info passed in arguments
+        """
+
+        create_kwargs = dict({'image_id': ''})
+        if keypair:
+            create_kwargs['key_name'] = keypair['name']
+        if security_group:
+            create_kwargs['security_groups'] = [
+                {'name': security_group['name']}]
+        create_kwargs.update(self._get_bdm(
+            source_id,
+            source_type,
+            delete_on_termination=delete_on_termination))
+        if name:
+            create_kwargs['name'] = name
+        create_kwargs.update(kwargs)
+
+        return self.create_server(**create_kwargs)
+
+    def create_volume_from_image(self, **kwargs):
+        """Create volume from image.
+
+        :param image_id: ID of the image to create volume from,
+            CONF.compute.image_ref by default
+        :param name: name of the volume,
+            '$classname-volume-origin' by default
+        :param **kwargs: additional parameters
+        """
+        image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+        name = kwargs.pop('name', None)
+        if not name:
+            namestart = self.__class__.__name__ + '-volume-origin'
+            name = data_utils.rand_name(namestart)
+        return self.create_volume(name=name, imageRef=image_id, **kwargs)
+
+
+class NetworkScenarioTest(ScenarioTest):
+    """Base class for network scenario tests.
+
+    This class provide helpers for network scenario tests, using the neutron
+    API. Helpers from ancestor which use the nova network API are overridden
+    with the neutron API.
+
+    This Class also enforces using Neutron instead of novanetwork.
+    Subclassed tests will be skipped if Neutron is not enabled
+
+    """
+
+    @classmethod
+    def skip_checks(cls):
+        super(NetworkScenarioTest, cls).skip_checks()
+        if not CONF.service_available.neutron:
+            raise cls.skipException('Neutron not available')
+
+    def create_network(self, networks_client=None,
+                       project_id=None,
+                       namestart='network-smoke-',
+                       port_security_enabled=True, **net_dict):
+        if not networks_client:
+            networks_client = self.networks_client
+        if not project_id:
+            project_id = networks_client.project_id
+        name = data_utils.rand_name(namestart)
+        network_kwargs = dict(name=name, project_id=project_id)
+        if net_dict:
+            network_kwargs.update(net_dict)
+        # Neutron disables port security by default so we have to check the
+        # config before trying to create the network with port_security_enabled
+        if CONF.network_feature_enabled.port_security:
+            network_kwargs['port_security_enabled'] = port_security_enabled
+        result = networks_client.create_network(**network_kwargs)
+        network = result['network']
+
+        self.assertEqual(network['name'], name)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        networks_client.delete_network,
+                        network['id'])
+        return network
+
+    def create_subnet(self, network, subnets_client=None,
+                      namestart='subnet-smoke', **kwargs):
+        """Create a subnet for the given network
+
+        This utility creates subnet for the given network
+        within the cidr block configured for tenant networks.
+
+        :param **kwargs:
+            See extra parameters below
+
+        :Keyword Arguments:
+
+            * *ip_version = ip version of the given network,
+            use_default_subnetpool = default subnetpool to
+                    manage IPv6 addresses range.
+        """
+
+        if not subnets_client:
+            subnets_client = self.subnets_client
+
+        def cidr_in_use(cidr, project_id):
+            """Check cidr existence
+
+            :returns: True if subnet with cidr already exist in tenant or
+                  external False else
+            """
+            tenant_subnets = self.os_admin.subnets_client.list_subnets(
+                project_id=project_id, cidr=cidr)['subnets']
+            external_nets = self.os_admin.networks_client.list_networks(
+                **{"router:external": True})['networks']
+            external_subnets = []
+            for ext_net in external_nets:
+                external_subnets.extend(
+                    self.os_admin.subnets_client.list_subnets(
+                        network_id=ext_net['id'], cidr=cidr)['subnets'])
+            return len(tenant_subnets + external_subnets) != 0
+
+        def _make_create_subnet_request(namestart, network,
+                                        ip_version, subnets_client, **kwargs):
+
+            subnet = dict(
+                name=data_utils.rand_name(namestart),
+                network_id=network['id'],
+                project_id=network['project_id'],
+                ip_version=ip_version,
+                **kwargs
+            )
+
+            if ip_version == 6:
+                subnet['ipv6_address_mode'] = 'slaac'
+                subnet['ipv6_ra_mode'] = 'slaac'
+
+            try:
+                return subnets_client.create_subnet(**subnet)
+            except lib_exc.Conflict as e:
+                if 'overlaps with another subnet' not in str(e):
+                    raise
+
+        result = None
+        str_cidr = None
+
+        use_default_subnetpool = kwargs.get('use_default_subnetpool', False)
+        ip_version = kwargs.pop('ip_version', 4)
+
+        if not use_default_subnetpool:
+
+            if ip_version == 6:
+                tenant_cidr = netaddr.IPNetwork(
+                    CONF.network.project_network_v6_cidr)
+                num_bits = CONF.network.project_network_v6_mask_bits
+            else:
+                tenant_cidr = netaddr.IPNetwork(
+                    CONF.network.project_network_cidr)
+                num_bits = CONF.network.project_network_mask_bits
+
+        # Repeatedly attempt subnet creation with sequential cidr
+        # blocks until an unallocated block is found.
+            for subnet_cidr in tenant_cidr.subnet(num_bits):
+                str_cidr = str(subnet_cidr)
+                if cidr_in_use(str_cidr, project_id=network['project_id']):
+                    continue
+                result = _make_create_subnet_request(
+                    namestart, network, ip_version, subnets_client,
+                    cidr=str_cidr, **kwargs)
+
+                if result is not None:
+                    break
+
+        else:
+            result = _make_create_subnet_request(
+                namestart, network, ip_version, subnets_client,
+                **kwargs)
+        self.assertIsNotNone(result, 'Unable to allocate tenant network')
+
+        subnet = result['subnet']
+        if str_cidr is not None:
+            self.assertEqual(subnet['cidr'], str_cidr)
+
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        subnets_client.delete_subnet, subnet['id'])
+
+        return subnet
+
+    def get_network_by_name(self, network_name):
+        net = self.os_admin.networks_client.list_networks(
+            name=network_name)['networks']
+        self.assertNotEmpty(net,
+                            "Unable to get network by name: %s" % network_name)
+        return net[0]
+
+    def check_floating_ip_status(self, floating_ip, status):
+        """Verifies floatingip reaches the given status
+
+        :param dict floating_ip: floating IP dict to check status
+        :param status: target status
+        :raises: AssertionError if status doesn't match
+        """
+
+        floatingip_id = floating_ip['id']
+
+        def refresh():
+            floating_ip = (self.floating_ips_client.
+                           show_floatingip(floatingip_id)['floatingip'])
+            if status == floating_ip['status']:
+                LOG.info("FloatingIP: {fp} is at status: {st}"
+                         .format(fp=floating_ip, st=status))
+            return status == floating_ip['status']
+
+        if not test_utils.call_until_true(refresh,
+                                          CONF.network.build_timeout,
+                                          CONF.network.build_interval):
+            floating_ip = self.floating_ips_client.show_floatingip(
+                floatingip_id)['floatingip']
+            self.assertEqual(status, floating_ip['status'],
+                             message="FloatingIP: {fp} is at status: {cst}. "
+                                     "failed  to reach status: {st}"
+                             .format(fp=floating_ip, cst=floating_ip['status'],
+                                     st=status))
+
+    def check_tenant_network_connectivity(self, server,
+                                          username,
+                                          private_key,
+                                          should_connect=True,
+                                          servers_for_debug=None):
+        """Checks tenant network connectivity"""
+        if not CONF.network.project_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for ip_addresses in server['addresses'].values():
+                for ip_address in ip_addresses:
+                    self.check_vm_connectivity(ip_address['addr'],
+                                               username,
+                                               private_key,
+                                               should_connect=should_connect)
+        except Exception as e:
+            LOG.exception('Tenant network connectivity check failed')
+            self.log_console_output(servers_for_debug)
+            self._log_net_info(e)
+            raise
+
+    def check_remote_connectivity(self, source, dest, should_succeed=True,
+                                  nic=None, protocol='icmp'):
+        """check server connectivity via source ssh connection
+
+        :param source: RemoteClient: an ssh connection from which to execute
+            the check
+        :param dest: an IP to check connectivity against
+        :param should_succeed: boolean should connection succeed or not
+        :param nic: specific network interface to test connectivity from
+        :param protocol: the protocol used to test connectivity with.
+        :returns: True, if the connection succeeded and it was expected to
+            succeed. False otherwise.
+        """
+
+        method_name = '%s_check' % protocol
+        connectivity_checker = getattr(source, method_name)
+
+        def connect_remote():
+            try:
+                connectivity_checker(dest, nic=nic)
+            except lib_exc.SSHExecCommandFailed:
+                LOG.warning('Failed to check %(protocol)s connectivity for '
+                            'IP %(dest)s via a ssh connection from: %(src)s.',
+                            dict(protocol=protocol, dest=dest,
+                                 src=source.ssh_client.host))
+                return not should_succeed
+            return should_succeed
+
+        result = test_utils.call_until_true(connect_remote,
+                                            CONF.validation.ping_timeout, 1)
+        if result:
+            return
+
+        source_host = source.ssh_client.host
+        if should_succeed:
+            msg = "Timed out waiting for %s to become reachable from %s" \
+                % (dest, source_host)
+        else:
+            msg = "%s is reachable from %s" % (dest, source_host)
+        self.log_console_output()
+        self.fail(msg)
+
+    def get_router(self, client=None, project_id=None, **kwargs):
         """Retrieve a router for the given tenant id.
 
         If a public router has been configured, it will be returned.
@@ -1278,21 +1461,31 @@
         network has, a tenant router will be created and returned that
         routes traffic to the public network.
         """
+
         if not client:
             client = self.routers_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
+        if not project_id:
+            project_id = client.project_id
         router_id = CONF.network.public_router_id
         network_id = CONF.network.public_network_id
         if router_id:
             body = client.show_router(router_id)
             return body['router']
         elif network_id:
+            name = kwargs.pop('name', None)
+            if not name:
+                namestart = self.__class__.__name__ + '-router'
+                name = data_utils.rand_name(namestart)
+
+            ext_gw_info = kwargs.pop('external_gateway_info', None)
+            if not ext_gw_info:
+                ext_gw_info = dict(network_id=network_id)
             router = client.create_router(
-                name=data_utils.rand_name(self.__class__.__name__ + '-router'),
-                admin_state_up=True,
-                tenant_id=tenant_id,
-                external_gateway_info=dict(network_id=network_id))['router']
+                name=name,
+                admin_state_up=kwargs.get('admin_state_up', True),
+                project_id=project_id,
+                external_gateway_info=ext_gw_info,
+                **kwargs)['router']
             self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                             client.delete_router, router['id'])
             return router
@@ -1300,16 +1493,17 @@
             raise Exception("Neither of 'public_router_id' or "
                             "'public_network_id' has been defined.")
 
-    def create_networks(self, networks_client=None,
-                        routers_client=None, subnets_client=None,
-                        tenant_id=None, dns_nameservers=None,
-                        port_security_enabled=True, **net_dict):
+    def setup_network_subnet_with_router(
+            self, networks_client=None,
+            routers_client=None, subnets_client=None,
+            project_id=None, dns_nameservers=None,
+            port_security_enabled=True, **net_dict):
         """Create a network with a subnet connected to a router.
 
         The baremetal driver is a special case since all nodes are
         on the same shared network.
 
-        :param tenant_id: id of tenant to create resources in.
+        :param project_id: id of project to create resources in.
         :param dns_nameservers: list of dns servers to send to subnet.
         :param port_security_enabled: whether or not port_security is enabled
         :param net_dict: a dict containing experimental network information in
@@ -1318,6 +1512,7 @@
                                    'provider:segmentation_id': '42'}
         :returns: network, subnet, router
         """
+
         if CONF.network.shared_physical_network:
             # NOTE(Shrews): This exception is for environments where tenant
             # credential isolation is available, but network separation is
@@ -1327,18 +1522,18 @@
             if not CONF.compute.fixed_network_name:
                 m = 'fixed_network_name must be specified in config'
                 raise lib_exc.InvalidConfiguration(m)
-            network = self._get_network_by_name(
+            network = self.get_network_by_name(
                 CONF.compute.fixed_network_name)
             router = None
             subnet = None
         else:
-            network = self._create_network(
+            network = self.create_network(
                 networks_client=networks_client,
-                tenant_id=tenant_id,
+                project_id=project_id,
                 port_security_enabled=port_security_enabled,
                 **net_dict)
-            router = self._get_router(client=routers_client,
-                                      tenant_id=tenant_id)
+            router = self.get_router(client=routers_client,
+                                     project_id=project_id)
             subnet_kwargs = dict(network=network,
                                  subnets_client=subnets_client)
             # use explicit check because empty list is a valid option
@@ -1362,8 +1557,6 @@
 class EncryptionScenarioTest(ScenarioTest):
     """Base class for encryption scenario tests"""
 
-    credentials = ['primary', 'admin']
-
     @classmethod
     def setup_clients(cls):
         super(EncryptionScenarioTest, cls).setup_clients()
@@ -1374,6 +1567,7 @@
     def create_encryption_type(self, client=None, type_id=None, provider=None,
                                key_size=None, cipher=None,
                                control_location=None):
+        """Creates an encryption type for volume"""
         if not client:
             client = self.admin_encryption_types_client
         if not type_id:
@@ -1387,6 +1581,7 @@
     def create_encrypted_volume(self, encryption_provider, volume_type,
                                 key_size=256, cipher='aes-xts-plain64',
                                 control_location='front-end'):
+        """Creates an encrypted volume"""
         volume_type = self.create_volume_type(name=volume_type)
         self.create_encryption_type(type_id=volume_type['id'],
                                     provider=encryption_provider,
@@ -1403,6 +1598,8 @@
     class.
     """
 
+    credentials = ['primary']
+
     @classmethod
     def skip_checks(cls):
         super(ObjectStorageScenarioTest, cls).skip_checks()
@@ -1427,11 +1624,12 @@
         cls.object_client = cls.os_operator.object_client
 
     def get_swift_stat(self):
-        """get swift status for our user account."""
+        """Get swift status for our user account."""
         self.account_client.list_account_containers()
         LOG.debug('Swift status information obtained successfully')
 
     def create_container(self, container_name=None):
+        """Creates container"""
         name = container_name or data_utils.rand_name(
             'swift-scenario-container')
         self.container_client.update_container(name)
@@ -1444,10 +1642,12 @@
         return name
 
     def delete_container(self, container_name):
+        """Deletes container"""
         self.container_client.delete_container(container_name)
         LOG.debug('Container %s deleted', container_name)
 
     def upload_object_to_container(self, container_name, obj_name=None):
+        """Uploads object to container"""
         obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
         obj_data = data_utils.random_bytes()
         self.object_client.create_object(container_name, obj_name, obj_data)
@@ -1458,6 +1658,7 @@
         return obj_name, obj_data
 
     def delete_object(self, container_name, filename):
+        """Deletes object"""
         self.object_client.delete_object(container_name, filename)
         self.list_and_check_container_objects(container_name,
                                               not_present_obj=[filename])
@@ -1465,8 +1666,13 @@
     def list_and_check_container_objects(self, container_name,
                                          present_obj=None,
                                          not_present_obj=None):
-        # List objects for a given container and assert which are present and
-        # which are not.
+        """List and verify objects for a given container
+
+        This utility lists objects for a given container
+        and asserts which are present and
+        which are not
+        """
+
         if present_obj is None:
             present_obj = []
         if not_present_obj is None:
@@ -1481,5 +1687,6 @@
                 self.assertNotIn(obj, object_list)
 
     def download_and_verify(self, container_name, obj_name, expected_data):
+        """Asserts the object and expected data to verify if they are same"""
         _, obj = self.object_client.get_object(container_name, obj_name)
         self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index b515639..58e234f 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -51,10 +51,27 @@
         return aggregate
 
     def _get_host_name(self):
+        # Find a host that has not been added to other availability zone,
+        # for one host can't be added to different availability zones.
         svc_list = self.services_client.list_services(
             binary='nova-compute')['services']
         self.assertNotEmpty(svc_list)
-        return svc_list[0]['host']
+        hosts_available = []
+        for host in svc_list:
+            if (host['state'] == 'up' and host['status'] == 'enabled'):
+                hosts_available.append(host['host'])
+        aggregates = self.aggregates_client.list_aggregates()['aggregates']
+        hosts_in_zone = []
+        for agg in aggregates:
+            if agg['availability_zone']:
+                hosts_in_zone.extend(agg['hosts'])
+        hosts = [v for v in hosts_available if v not in hosts_in_zone]
+        if not hosts:
+            raise self.skipException("All hosts are already in other "
+                                     "availability zones, so can't add "
+                                     "host to aggregate. \nAggregates list: "
+                                     "%s" % aggregates)
+        return hosts[0]
 
     def _add_host(self, aggregate_id, host):
         aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
new file mode 100644
index 0000000..b1098fa
--- /dev/null
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -0,0 +1,141 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import html.parser
+import ssl
+from urllib import parse
+from urllib import request
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+from tempest import test
+
+CONF = config.CONF
+
+
+class HorizonHTMLParser(html.parser.HTMLParser):
+    csrf_token = None
+    region = None
+    login = None
+
+    def _find_name(self, attrs, name):
+        for attrpair in attrs:
+            if attrpair[0] == 'name' and attrpair[1] == name:
+                return True
+        return False
+
+    def _find_value(self, attrs):
+        for attrpair in attrs:
+            if attrpair[0] == 'value':
+                return attrpair[1]
+        return None
+
+    def _find_attr_value(self, attrs, attr_name):
+        for attrpair in attrs:
+            if attrpair[0] == attr_name:
+                return attrpair[1]
+        return None
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'input':
+            if self._find_name(attrs, 'csrfmiddlewaretoken'):
+                self.csrf_token = self._find_value(attrs)
+            if self._find_name(attrs, 'region'):
+                self.region = self._find_value(attrs)
+        if tag == 'form':
+            self.login = self._find_attr_value(attrs, 'action')
+
+
+class TestDashboardBasicOps(test.BaseTestCase):
+
+    """The test suite for dashboard basic operations
+
+    This is a basic scenario test:
+    * checks that the login page is available
+    * logs in as a regular user
+    * checks that the user home page loads without error
+    """
+    opener = None
+
+    credentials = ['primary']
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestDashboardBasicOps, cls).skip_checks()
+        if not CONF.service_available.horizon:
+            raise cls.skipException("Horizon support is required")
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources()
+        super(TestDashboardBasicOps, cls).setup_credentials()
+
+    def check_login_page(self):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+        self.assertIn("id_username", response.decode("utf-8"))
+
+    def user_login(self, username, password):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+
+        # Grab the CSRF token and default region
+        parser = HorizonHTMLParser()
+        parser.feed(response.decode("utf-8"))
+
+        # construct login url for dashboard, discovery accommodates non-/ web
+        # root for dashboard
+        login_url = parse.urljoin(CONF.dashboard.dashboard_url, parser.login)
+
+        # Prepare login form request
+        req = request.Request(login_url)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+        req.add_header('Referer', CONF.dashboard.dashboard_url)
+
+        # Pass the default domain name regardless of the auth version in order
+        # to test the scenario of when horizon is running with keystone v3
+        params = {'username': username,
+                  'password': password,
+                  'region': parser.region,
+                  'domain': CONF.auth.default_credentials_domain_name,
+                  'csrfmiddlewaretoken': parser.csrf_token}
+        self._get_opener().open(req, parse.urlencode(params).encode())
+
+    def check_home_page(self):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+        self.assertIn('Overview', response.decode("utf-8"))
+
+    def _get_opener(self):
+        if not self.opener:
+            if (CONF.dashboard.disable_ssl_certificate_validation and
+                    self._ssl_default_context_supported()):
+                ctx = ssl.create_default_context()
+                ctx.check_hostname = False
+                ctx.verify_mode = ssl.CERT_NONE
+                self.opener = request.build_opener(
+                    request.HTTPSHandler(context=ctx),
+                    request.HTTPCookieProcessor())
+            else:
+                self.opener = request.build_opener(
+                    request.HTTPCookieProcessor())
+        return self.opener
+
+    def _ssl_default_context_supported(self):
+        return (hasattr(ssl, 'create_default_context'))
+
+    @decorators.attr(type='smoke')
+    @decorators.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
+    @utils.services('dashboard')
+    def test_basic_scenario(self):
+        creds = self.os_primary.credentials
+        self.check_login_page()
+        self.user_login(creds.username, creds.password)
+        self.check_home_page()
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 008d1ae..6ee9f28 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -30,8 +30,7 @@
     For both LUKS and cryptsetup encryption types, this test performs
     the following:
 
-    * Creates an image in Glance
-    * Boots an instance from the image
+    * Boots an instance from an image (CONF.compute.image_ref)
     * Creates an encryption type (as admin)
     * Creates a volume of that encryption type (as a regular user)
     * Attaches and detaches the encrypted volume to the instance
@@ -44,10 +43,9 @@
             raise cls.skipException('Encrypted volume attach is not supported')
 
     def launch_instance(self):
-        image = self.glance_image_create()
         keypair = self.create_keypair()
 
-        return self.create_server(image_id=image, key_name=keypair['name'])
+        return self.create_server(key_name=keypair['name'])
 
     def attach_detach_volume(self, server, volume):
         attached_volume = self.nova_volume_attach(server, volume)
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
deleted file mode 100644
index e7085f6..0000000
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright (c) 2019 Ericsson
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest.common import utils
-from tempest.common import waiters
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
-from tempest.lib import decorators
-from tempest.scenario import manager
-
-
-LOG = logging.getLogger(__name__)
-CONF = config.CONF
-
-
-class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
-    credentials = ['primary', 'admin']
-    required_extensions = ['port-resource-request',
-                           'qos',
-                           'qos-bw-minimum-ingress']
-    # The feature QoS minimum bandwidth allocation in Placement API depends on
-    # Granular resource requests to GET /allocation_candidates and Support
-    # allocation candidates with nested resource providers features in
-    # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
-    # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
-    # means that the minimum placement microversion is 1.29
-    placement_min_microversion = '1.29'
-    placement_max_microversion = 'latest'
-
-    # Nova rejects to boot VM with port which has resource_request field, below
-    # microversion 2.72
-    compute_min_microversion = '2.72'
-    compute_max_microversion = 'latest'
-
-    INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
-    INGRESS_DIRECTION = 'ingress'
-
-    SMALLEST_POSSIBLE_BW = 1
-    # For any realistic inventory value (that is inventory != MAX_INT) an
-    # allocation candidate request of MAX_INT is expected to be rejected, see:
-    # https://github.com/openstack/placement/blob/master/placement/
-    # db/constants.py#L16
-    PLACEMENT_MAX_INT = 0x7FFFFFFF
-
-    @classmethod
-    def setup_clients(cls):
-        super(MinBwAllocationPlacementTest, cls).setup_clients()
-        cls.placement_client = cls.os_admin.placement_client
-        cls.networks_client = cls.os_admin.networks_client
-        cls.subnets_client = cls.os_admin.subnets_client
-        cls.routers_client = cls.os_adm.routers_client
-        cls.qos_client = cls.os_admin.qos_client
-        cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
-
-    @classmethod
-    def skip_checks(cls):
-        super(MinBwAllocationPlacementTest, cls).skip_checks()
-        if not CONF.network_feature_enabled.qos_placement_physnet:
-            msg = "Skipped as no physnet is available in config for " \
-                  "placement based QoS allocation."
-            raise cls.skipException(msg)
-
-    def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
-        policy = self.qos_client.create_qos_policy(
-            name=data_utils.rand_name(name_prefix),
-            shared=True)['policy']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.qos_client.delete_qos_policy, policy['id'])
-        rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
-            policy['id'],
-            **{
-                'min_kbps': min_kbps,
-                'direction': self.INGRESS_DIRECTION
-            })['minimum_bandwidth_rule']
-        self.addCleanup(
-            test_utils.call_and_ignore_notfound_exc,
-            self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
-            rule['id'])
-
-        return policy
-
-    def _create_qos_policies(self):
-        self.qos_policy_valid = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_valid',
-            min_kbps=self.SMALLEST_POSSIBLE_BW)
-        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
-            name_prefix='test_policy_not_valid',
-            min_kbps=self.PLACEMENT_MAX_INT)
-
-    def _create_network_and_qos_policies(self):
-        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
-        base_segm = \
-            CONF.network_feature_enabled.provider_net_base_segmentation_id
-
-        self.prov_network, _, _ = self.create_networks(
-            networks_client=self.networks_client,
-            routers_client=self.routers_client,
-            subnets_client=self.subnets_client,
-            **{
-                'shared': True,
-                'provider:network_type': 'vlan',
-                'provider:physical_network': physnet_name,
-                'provider:segmentation_id': base_segm
-            })
-
-        self._create_qos_policies()
-
-    def _check_if_allocation_is_possible(self):
-        alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
-                                  self.SMALLEST_POSSIBLE_BW))
-        if len(alloc_candidates['provider_summaries']) == 0:
-            self.fail('No allocation candidates are available for %s:%s' %
-                      (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
-
-        # Just to be sure check with impossible high (placement max_int),
-        # allocation
-        alloc_candidates = self.placement_client.list_allocation_candidates(
-            resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
-                                  self.PLACEMENT_MAX_INT))
-        if len(alloc_candidates['provider_summaries']) != 0:
-            self.fail('For %s:%s there should be no available candidate!' %
-                      (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
-
-    @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
-    @decorators.attr(type='slow')
-    @utils.services('compute', 'network')
-    def test_qos_min_bw_allocation_basic(self):
-        """"Basic scenario with QoS min bw allocation in placement.
-
-        Steps:
-        * Create prerequisites:
-        ** VLAN type provider network with subnet.
-        ** valid QoS policy with minimum bandwidth rule with min_kbps=1
-        (This is a simplification to skip the checks in placement for
-        detecting the resource provider tree and inventories, as if
-        bandwidth resource is available 1 kbs will be available).
-        ** invalid QoS policy with minimum bandwidth rule with
-        min_kbs=max integer from placement (this is a simplification again
-        to avoid detection of RP tress and inventories, as placement will
-        reject such big allocation).
-        * Create port with valid QoS policy, and boot VM with that, it should
-        pass.
-        * Create port with invalid QoS policy, and try to boot VM with that,
-        it should fail.
-        """
-
-        self._check_if_allocation_is_possible()
-
-        self._create_network_and_qos_policies()
-
-        valid_port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': valid_port['id']}])
-        allocations = self.placement_client.list_allocations(server1['id'])
-
-        self.assertGreater(len(allocations['allocations']), 0)
-        bw_resource_in_alloc = False
-        for rp, resources in allocations['allocations'].items():
-            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
-                bw_resource_in_alloc = True
-        self.assertTrue(bw_resource_in_alloc)
-
-        # boot another vm with max int bandwidth
-        not_valid_port = self.create_port(
-            self.prov_network['id'],
-            qos_policy_id=self.qos_policy_not_valid['id'])
-        server2 = self.create_server(
-            wait_until=None,
-            networks=[{'port': not_valid_port['id']}])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server2['id'],
-            status='ERROR', ready_wait=False, raise_on_error=False)
-        allocations = self.placement_client.list_allocations(server2['id'])
-
-        self.assertEqual(0, len(allocations['allocations']))
-        server2 = self.servers_client.show_server(server2['id'])
-        self.assertIn('fault', server2['server'])
-        self.assertIn('No valid host', server2['server']['fault']['message'])
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 4cd860d..5aac19c 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -78,7 +78,7 @@
         self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
 
     def create_and_add_security_group_to_server(self, server):
-        secgroup = self._create_security_group()
+        secgroup = self.create_security_group()
         self.servers_client.add_security_group(server['id'],
                                                name=secgroup['name'])
         self.addCleanup(self.servers_client.remove_security_group,
@@ -96,17 +96,10 @@
                    '%s' % (secgroup['id'], server['id']))
             raise exceptions.TimeoutException(msg)
 
-    def _get_floating_ip_in_server_addresses(self, floating_ip, server):
-        for addresses in server['addresses'].values():
-            for address in addresses:
-                if (address['OS-EXT-IPS:type'] == 'floating' and
-                        address['addr'] == floating_ip['ip']):
-                    return address
-
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
-        image = self.glance_image_create()
+        image = self.image_create()
         keypair = self.create_keypair()
 
         server = self.create_server(image_id=image, key_name=keypair['name'])
@@ -129,17 +122,12 @@
         server = self.servers_client.show_server(server['id'])['server']
         if (CONF.network_feature_enabled.floating_ips and
             CONF.network.floating_network_name):
-            floating_ip = self.create_floating_ip(server)
-            # fetch the server again to make sure the addresses were refreshed
-            # after associating the floating IP
-            server = self.servers_client.show_server(server['id'])['server']
-            address = self._get_floating_ip_in_server_addresses(
-                floating_ip, server)
-            self.assertIsNotNone(
-                address,
-                "Failed to find floating IP '%s' in server addresses: %s" %
-                (floating_ip['ip'], server['addresses']))
-            ssh_ip = floating_ip['ip']
+            fip = self.create_floating_ip(server)
+            floating_ip = self.associate_floating_ip(
+                fip, server)
+            waiters.wait_for_server_floating_ip(self.servers_client,
+                                                server, floating_ip)
+            ssh_ip = floating_ip['floating_ip_address']
         else:
             ssh_ip = self.get_server_ip(server)
 
@@ -162,20 +150,7 @@
 
         if floating_ip:
             # delete the floating IP, this should refresh the server addresses
-            self.compute_floating_ips_client.delete_floating_ip(
-                floating_ip['id'])
-
-            def is_floating_ip_detached_from_server():
-                server_info = self.servers_client.show_server(
-                    server['id'])['server']
-                address = self._get_floating_ip_in_server_addresses(
-                    floating_ip, server_info)
-                return (not address)
-
-            if not test_utils.call_until_true(
-                is_floating_ip_detached_from_server,
-                CONF.compute.build_timeout,
-                CONF.compute.build_interval):
-                msg = ("Floating IP '%s' should not be in server addresses: %s"
-                       % (floating_ip['ip'], server['addresses']))
-                raise exceptions.TimeoutException(msg)
+            self.disassociate_floating_ip(floating_ip)
+            waiters.wait_for_server_floating_ip(
+                self.servers_client, server, floating_ip,
+                wait_for_disassociate=True)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index b1919d4..b48ac3c 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -60,9 +60,9 @@
     def _setup_server(self, keypair):
         security_groups = []
         if utils.is_extension_enabled('security-group', 'network'):
-            security_group = self._create_security_group()
+            security_group = self.create_security_group()
             security_groups = [{'name': security_group['name']}]
-        network, _, _ = self.create_networks()
+        network, _, _ = self.setup_network_subnet_with_router()
         server = self.create_server(
             networks=[{'uuid': network['id']}],
             key_name=keypair['name'],
@@ -80,8 +80,8 @@
         return floating_ip
 
     def _check_network_connectivity(self, server, keypair, floating_ip,
-                                    should_connect=True):
-        username = CONF.validation.image_ssh_user
+                                    should_connect=True,
+                                    username=CONF.validation.image_ssh_user):
         private_key = keypair['private_key']
         self.check_tenant_network_connectivity(
             server, username, private_key,
@@ -95,12 +95,13 @@
                                    'Public network connectivity check failed',
                                    server)
 
-    def _wait_server_status_and_check_network_connectivity(self, server,
-                                                           keypair,
-                                                           floating_ip):
+    def _wait_server_status_and_check_network_connectivity(
+        self, server, keypair, floating_ip,
+        username=CONF.validation.image_ssh_user):
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
-        self._check_network_connectivity(server, keypair, floating_ip)
+        self._check_network_connectivity(server, keypair, floating_ip,
+                                         username=username)
 
     @decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
     @decorators.attr(type='slow')
@@ -137,10 +138,11 @@
         server = self._setup_server(keypair)
         floating_ip = self._setup_network(server, keypair)
         image_ref_alt = CONF.compute.image_ref_alt
+        username_alt = CONF.validation.image_alt_ssh_user
         self.servers_client.rebuild_server(server['id'],
                                            image_ref=image_ref_alt)
         self._wait_server_status_and_check_network_connectivity(
-            server, keypair, floating_ip)
+            server, keypair, floating_ip, username_alt)
 
     @decorators.idempotent_id('2b2642db-6568-4b35-b812-eceed3fa20ce')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
@@ -262,7 +264,7 @@
         self._wait_server_status_and_check_network_connectivity(
             server, keypair, floating_ip)
 
-    @decorators.skip_because(bug='1836595')
+    @decorators.unstable_test(bug='1836595')
     @decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
     @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                           'Cold migration is not available.')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index f46c7e8..cbe8c20 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -106,7 +106,8 @@
 
     def _setup_network_and_servers(self, **kwargs):
         boot_with_port = kwargs.pop('boot_with_port', False)
-        self.network, self.subnet, self.router = self.create_networks(**kwargs)
+        self.network, self.subnet, self.router = (
+            self.setup_network_subnet_with_router(**kwargs))
         self.check_networks()
 
         self.ports = []
@@ -159,7 +160,7 @@
         keypair = self.create_keypair()
         self.keypairs[keypair['name']] = keypair
         security_groups = [
-            {'name': self._create_security_group()['name']}
+            {'name': self.create_security_group()['name']}
         ]
         network = {'uuid': network['id']}
         if port_id is not None:
@@ -223,14 +224,14 @@
         floating_ip, server = self.floating_ip_tuple
         # create a new server for the floating ip
         server = self._create_server(self.network)
-        port_id, _ = self._get_server_port_id_and_ip4(server)
+        port_id, _ = self.get_server_port_id_and_ip4(server)
         floating_ip = self.floating_ips_client.update_floatingip(
             floating_ip['id'], port_id=port_id)['floatingip']
         self.assertEqual(port_id, floating_ip['port_id'])
         self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def _create_new_network(self, create_gateway=False):
-        self.new_net = self._create_network()
+        self.new_net = self.create_network()
         if create_gateway:
             self.new_subnet = self.create_subnet(
                 network=self.new_net)
@@ -297,9 +298,19 @@
         ip_mask = CONF.network.project_network_mask_bits
         # check if the address is not already in use, if not, set it
         if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
-            ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
-                                    ip_address, ip_mask, new_nic))
-            ssh_client.exec_command("sudo ip link set %s up" % new_nic)
+            try:
+                ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
+                                        ip_address, ip_mask, new_nic))
+                ssh_client.exec_command("sudo ip link set %s up" % new_nic)
+            except exceptions.SSHExecCommandFailed as exc:
+                if 'RTNETLINK answers: File exists' in str(exc):
+                    LOG.debug(
+                        'IP address %(ip_address)s is already set in device '
+                        '%(device)s\nPrevious "ip a" output: %(ip_output)s',
+                        {'ip_address': ip_address, 'device': new_nic,
+                         'ip_output': ip_output})
+                else:
+                    raise exc
 
     def _get_server_nics(self, ssh_client):
         reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+)[@]?.*:')
@@ -318,13 +329,16 @@
         floating_ip, server = self.floating_ip_tuple
         # get internal ports' ips:
         # get all network and compute ports in the new network
+        # NOTE(ralonsoh): device_owner="network:distributed" ports are OVN
+        # metadata ports and should be filtered out.
         internal_ips = (
             p['fixed_ips'][0]['ip_address'] for p in
             self.os_admin.ports_client.list_ports(
-                tenant_id=server['tenant_id'],
+                project_id=server['tenant_id'],
                 network_id=network['id'])['ports']
-            if p['device_owner'].startswith('network') or
-            p['device_owner'].startswith('compute')
+            if ((p['device_owner'].startswith('network') and
+                 not p['device_owner'] == 'network:distributed') or
+                p['device_owner'].startswith('compute'))
         )
 
         self._check_server_connectivity(floating_ip,
@@ -346,10 +360,19 @@
                 network_id=CONF.network.public_network_id)['subnets']
             if s['ip_version'] == 4
         ]
-        self.assertEqual(1, len(v4_subnets),
-                         "Found %d IPv4 subnets" % len(v4_subnets))
 
-        external_ips = [v4_subnets[0]['gateway_ip']]
+        if len(v4_subnets) > 1:
+            self.assertTrue(
+                CONF.network.subnet_id,
+                "Found %d subnets. Specify subnet using configuration "
+                "option [network].subnet_id."
+                % len(v4_subnets))
+            subnet = self.os_admin.subnets_client.show_subnet(
+                CONF.network.subnet_id)['subnet']
+            external_ips = [subnet['gateway_ip']]
+        else:
+            external_ips = [v4_subnets[0]['gateway_ip']]
+
         self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
                                         external_ips)
 
@@ -604,6 +627,13 @@
         ssh_client = self.get_remote_client(
             ip_address, private_key=private_key, server=server)
 
+        # NOTE: Server needs to renew its dhcp lease in order to get new
+        # definitions from subnet
+        # NOTE(amuller): we are renewing the lease as part of the retry
+        # because Neutron updates dnsmasq asynchronously after the
+        # subnet-update API call returns.
+        ssh_client.renew_lease(fixed_ip=floating_ip['fixed_ip_address'],
+                               dhcp_client=CONF.scenario.dhcp_client)
         dns_servers = [initial_dns_server]
         servers = ssh_client.get_dns_servers()
         self.assertEqual(set(dns_servers), set(servers),
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
new file mode 100644
index 0000000..db4751b
--- /dev/null
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -0,0 +1,510 @@
+# Copyright (c) 2019 Ericsson
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+
+
+class NetworkQoSPlacementTestBase(manager.NetworkScenarioTest):
+    """Base class for Network QoS testing
+
+    Base class for testing Network QoS scenarios involving placement
+    resource allocations.
+    """
+    credentials = ['primary', 'admin']
+    # The feature QoS minimum bandwidth allocation in Placement API depends on
+    # Granular resource requests to GET /allocation_candidates and Support
+    # allocation candidates with nested resource providers features in
+    # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
+    # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
+    # means that the minimum placement microversion is 1.29
+    placement_min_microversion = '1.29'
+    placement_max_microversion = 'latest'
+
+    # Nova rejects to boot VM with port which has resource_request field, below
+    # microversion 2.72
+    compute_min_microversion = '2.72'
+    compute_max_microversion = 'latest'
+
+    INGRESS_DIRECTION = 'ingress'
+    BW_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+
+    # For any realistic inventory value (that is inventory != MAX_INT) an
+    # allocation candidate request of MAX_INT is expected to be rejected, see:
+    # https://github.com/openstack/placement/blob/master/placement/
+    # db/constants.py#L16
+    PLACEMENT_MAX_INT = 0x7FFFFFFF
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.placement_client = cls.os_admin.placement_client
+        cls.networks_client = cls.os_admin.networks_client
+        cls.subnets_client = cls.os_admin.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
+        cls.routers_client = cls.os_adm.routers_client
+        cls.qos_client = cls.os_admin.qos_client
+        cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+        cls.flavors_client = cls.os_adm.flavors_client
+        cls.servers_client = cls.os_primary.servers_client
+
+    def _create_flavor_to_resize_to(self):
+        old_flavor = self.flavors_client.show_flavor(
+            CONF.compute.flavor_ref)['flavor']
+        new_flavor = self.flavors_client.create_flavor(**{
+            'ram': old_flavor['ram'],
+            'vcpus': old_flavor['vcpus'],
+            'name': old_flavor['name'] + 'extra',
+            'disk': old_flavor['disk'] + 1
+        })['flavor']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.flavors_client.delete_flavor, new_flavor['id'])
+        return new_flavor
+
+
+class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase):
+
+    required_extensions = ['port-resource-request',
+                           'qos',
+                           'qos-bw-minimum-ingress']
+
+    SMALLEST_POSSIBLE_BW = 1
+    BANDWIDTH_1 = 1000
+    BANDWIDTH_2 = 2000
+
+    @classmethod
+    def skip_checks(cls):
+        super(MinBwAllocationPlacementTest, cls).skip_checks()
+        if not CONF.network_feature_enabled.qos_placement_physnet:
+            msg = "Skipped as no physnet is available in config for " \
+                  "placement based QoS allocation."
+            raise cls.skipException(msg)
+
+    def setUp(self):
+        super(MinBwAllocationPlacementTest, self).setUp()
+        self._check_if_allocation_is_possible()
+
+    def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
+        policy = self.qos_client.create_qos_policy(
+            name=data_utils.rand_name(name_prefix),
+            shared=True)['policy']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.qos_client.delete_qos_policy, policy['id'])
+        rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+            policy['id'],
+            **{
+                'min_kbps': min_kbps,
+                'direction': self.INGRESS_DIRECTION
+            })['minimum_bandwidth_rule']
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
+            rule['id'])
+
+        return policy
+
+    def _create_qos_basic_policies(self):
+        self.qos_policy_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_valid',
+            min_kbps=self.SMALLEST_POSSIBLE_BW)
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+
+    def _create_qos_policies_from_life(self):
+        # For tempest-slow the max bandwidth configured is 1000000,
+        # https://opendev.org/openstack/tempest/src/branch/master/
+        # .zuul.yaml#L416-L420
+        self.qos_policy_1 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_1',
+            min_kbps=self.BANDWIDTH_1
+        )
+        self.qos_policy_2 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_2',
+            min_kbps=self.BANDWIDTH_2
+        )
+
+    def _create_network_and_qos_policies(self, policy_method):
+        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+        base_segm = \
+            CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+        self.prov_network, _, _ = self.setup_network_subnet_with_router(
+            networks_client=self.networks_client,
+            routers_client=self.routers_client,
+            subnets_client=self.subnets_client,
+            **{
+                'shared': True,
+                'provider:network_type': 'vlan',
+                'provider:physical_network': physnet_name,
+                'provider:segmentation_id': base_segm
+            })
+
+        policy_method()
+
+    def _check_if_allocation_is_possible(self):
+        alloc_candidates = self.placement_client.list_allocation_candidates(
+            resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
+                                  self.SMALLEST_POSSIBLE_BW))
+        if len(alloc_candidates['provider_summaries']) == 0:
+            self.fail('No allocation candidates are available for %s:%s' %
+                      (self.BW_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+
+        # Just to be sure check with impossible high (placement max_int),
+        # allocation
+        alloc_candidates = self.placement_client.list_allocation_candidates(
+            resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
+                                  self.PLACEMENT_MAX_INT))
+        if len(alloc_candidates['provider_summaries']) != 0:
+            self.fail('For %s:%s there should be no available candidate!' %
+                      (self.BW_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+
+    def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+        wait_until = (None if status == 'ERROR' else status)
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+        server = self.create_server(networks=[{'port': port['id']}],
+                                    wait_until=wait_until)
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status=status, ready_wait=False, raise_on_error=False)
+        return server, port
+
+    def _assert_allocation_is_as_expected(self, consumer, port_ids,
+                                          min_kbps=SMALLEST_POSSIBLE_BW):
+        allocations = self.placement_client.list_allocations(
+            consumer)['allocations']
+        self.assertGreater(len(allocations), 0)
+        bw_resource_in_alloc = False
+        allocation_rp = None
+        for rp, resources in allocations.items():
+            if self.BW_RESOURCE_CLASS in resources['resources']:
+                self.assertEqual(
+                    min_kbps,
+                    resources['resources'][self.BW_RESOURCE_CLASS])
+                bw_resource_in_alloc = True
+                allocation_rp = rp
+        if min_kbps:
+            self.assertTrue(bw_resource_in_alloc)
+
+            # Check binding_profile of the port is not empty and equals with
+            # the rp uuid
+            for port_id in port_ids:
+                port = self.os_admin.ports_client.show_port(port_id)
+                port_binding_alloc = port['port']['binding:profile'][
+                    'allocation']
+                # NOTE(gibi): the format of the allocation key depends on the
+                # existence of port-resource-request-groups API extension.
+                # TODO(gibi): drop the else branch once tempest does not need
+                # to support Xena release any more.
+                if utils.is_extension_enabled(
+                        'port-resource-request-groups', 'network'):
+                    self.assertEqual(
+                        {allocation_rp},
+                        set(port_binding_alloc.values()))
+                else:
+                    self.assertEqual(allocation_rp, port_binding_alloc)
+
+    @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_basic(self):
+        """"Basic scenario with QoS min bw allocation in placement.
+
+        Steps:
+        * Create prerequisites:
+        ** VLAN type provider network with subnet.
+        ** valid QoS policy with minimum bandwidth rule with min_kbps=1
+        (This is a simplification to skip the checks in placement for
+        detecting the resource provider tree and inventories, as if
+        bandwidth resource is available 1 kbs will be available).
+        ** invalid QoS policy with minimum bandwidth rule with
+        min_kbs=max integer from placement (this is a simplification again
+        to avoid detection of RP tress and inventories, as placement will
+        reject such big allocation).
+        * Create port with valid QoS policy, and boot VM with that, it should
+        pass.
+        * Create port with invalid QoS policy, and try to boot VM with that,
+        it should fail.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server1, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server1['id'],
+                                               [valid_port['id']])
+
+        server2, not_valid_port = self._boot_vm_with_min_bw(
+            self.qos_policy_not_valid['id'], status='ERROR')
+        allocations = self.placement_client.list_allocations(server2['id'])
+
+        self.assertEqual(0, len(allocations['allocations']))
+        server2 = self.servers_client.show_server(server2['id'])
+        self.assertIn('fault', server2['server'])
+        self.assertIn('No valid host', server2['server']['fault']['message'])
+        # Check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(not_valid_port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration is not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network')
+    def test_migrate_with_qos_min_bw_allocation(self):
+        """Scenario to migrate VM with QoS min bw allocation in placement
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * migrate the server
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.os_adm.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.os_adm.servers_client.confirm_resize_server(
+            server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_resize_with_qos_min_bw_allocation(self):
+        """Scenario to resize VM with QoS min bw allocation in placement.
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * resize the server with new flavor
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        new_flavor = self._create_flavor_to_resize_to()
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy(self):
+        """Test the update of QoS policy on bound port
+
+        Related RFE in neutron: #1882804
+        The scenario is the following:
+        * Have a port with QoS policy and minimum bandwidth rule.
+        * Boot a VM with the port.
+        * Update the port with a new policy with different minimum bandwidth
+        values.
+        * The allocation on placement side should be according to the new
+        rules.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_2)
+
+        # I changed my mind
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_1['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        # bad request....
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+        port_orig = self.ports_client.show_port(port['id'])['port']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.ports_client.update_port,
+            port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        port_upd = self.ports_client.show_port(port['id'])['port']
+        self.assertEqual(port_orig['qos_policy_id'],
+                         port_upd['qos_policy_id'])
+        self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
+
+    @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_from_zero(self):
+        """Test port without QoS policy to have QoS policy
+
+        This scenario checks if updating a port without QoS policy to
+        have QoS policy with minimum_bandwidth rule succeeds only on
+        controlplane, but placement allocation remains 0.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(self.prov_network['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+        self.ports_client.update_port(
+            port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_to_zero(self):
+        """Test port with QoS policy to remove QoS policy
+
+        In this scenario port with QoS minimum_bandwidth rule update to
+        remove QoS policy results in 0 placement allocation.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': None})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_with_multiple_ports(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port1 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+        port2 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port1['id']}, {'port': port2['id']}])
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            self.BANDWIDTH_1 + self.BANDWIDTH_2)
+
+        self.ports_client.update_port(
+            port1['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            2 * self.BANDWIDTH_2)
+
+    @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
+    @utils.services('compute', 'network')
+    def test_empty_update(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+        self.ports_client.update_port(
+            port['id'],
+            **{'description': 'foo'})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 8de6614..4f5118b 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -66,7 +66,7 @@
     def setUp(self):
         super(TestGettingAddress, self).setUp()
         self.keypair = self.create_keypair()
-        self.sec_grp = self._create_security_group()
+        self.sec_grp = self.create_security_group()
 
     def prepare_network(self, address6_mode, n_subnets6=1, dualnet=False):
         """Prepare network
@@ -77,15 +77,15 @@
         if dualnet - create IPv6 subnets on a different network
         :return: list of created networks
         """
-        network = self._create_network()
+        network = self.create_network()
         if dualnet:
-            network_v6 = self._create_network()
+            network_v6 = self.create_network()
 
         sub4 = self.create_subnet(network=network,
                                   namestart='sub4',
                                   ip_version=4)
 
-        router = self._get_router()
+        router = self.get_router()
         self.routers_client.add_router_interface(router['id'],
                                                  subnet_id=sub4['id'])
 
@@ -130,7 +130,7 @@
             key_name=self.keypair['name'],
             security_groups=[{'name': self.sec_grp['name']}],
             networks=[{'uuid': n['id']} for n in networks])
-        fip = self.create_floating_ip(thing=srv)
+        fip = self.create_floating_ip(server=srv)
         ips = self.define_server_ips(srv=srv)
         ssh = self.get_remote_client(
             ip_address=fip['floating_ip_address'],
@@ -218,7 +218,7 @@
                     guest_has_address,
                     CONF.validation.ping_timeout, 1, ssh, ip)
                 if not result:
-                    self._log_console_output(servers=[srv])
+                    self.log_console_output(servers=[srv])
                     self.fail(
                         'Address %s not configured for instance %s, '
                         'ip address output is\n%s' %
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 9cbd831..aff7509 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -176,7 +176,7 @@
         cls.primary_tenant = cls.TenantProperties(cls.os_primary)
         cls.alt_tenant = cls.TenantProperties(cls.os_alt)
         for tenant in [cls.primary_tenant, cls.alt_tenant]:
-            cls.tenants[tenant.creds.tenant_id] = tenant
+            cls.tenants[tenant.creds.project_id] = tenant
 
         cls.floating_ip_access = not CONF.network.public_router_id
 
@@ -197,16 +197,16 @@
         tenant.keypair = keypair
 
     def _create_tenant_security_groups(self, tenant):
-        access_sg = self._create_empty_security_group(
+        access_sg = self.create_empty_security_group(
             namestart='secgroup_access-',
-            tenant_id=tenant.creds.tenant_id,
+            project_id=tenant.creds.project_id,
             client=tenant.manager.security_groups_client
         )
 
         # don't use default secgroup since it allows in-project traffic
-        def_sg = self._create_empty_security_group(
+        def_sg = self.create_empty_security_group(
             namestart='secgroup_general-',
-            tenant_id=tenant.creds.tenant_id,
+            project_id=tenant.creds.project_id,
             client=tenant.manager.security_groups_client
         )
         tenant.security_groups.update(access=access_sg, default=def_sg)
@@ -217,7 +217,7 @@
             direction='ingress',
         )
         sec_group_rules_client = tenant.manager.security_group_rules_client
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=access_sg,
             sec_group_rules_client=sec_group_rules_client,
             **ssh_rule)
@@ -326,7 +326,7 @@
         self.floating_ips.setdefault(server['id'], floating_ip)
 
     def _create_tenant_network(self, tenant, port_security_enabled=True):
-        network, subnet, router = self.create_networks(
+        network, subnet, router = self.setup_network_subnet_with_router(
             networks_client=tenant.manager.networks_client,
             routers_client=tenant.manager.routers_client,
             subnets_client=tenant.manager.subnets_client,
@@ -385,7 +385,7 @@
             remote_group_id=tenant.security_groups['default']['id'],
             direction='ingress'
         )
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=tenant.security_groups['default'],
             security_groups_client=tenant.manager.security_groups_client,
             **ruleset
@@ -413,7 +413,7 @@
         protocol = ruleset['protocol']
         sec_group_rules_client = (
             dest_tenant.manager.security_group_rules_client)
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=dest_tenant.security_groups['default'],
             sec_group_rules_client=sec_group_rules_client,
             **ruleset
@@ -429,7 +429,7 @@
         # allow reverse traffic and check
         sec_group_rules_client = (
             source_tenant.manager.security_group_rules_client)
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=source_tenant.security_groups['default'],
             sec_group_rules_client=sec_group_rules_client,
             **ruleset
@@ -464,9 +464,9 @@
     def _log_console_output_for_all_tenants(self):
         for tenant in self.tenants.values():
             client = tenant.manager.servers_client
-            self._log_console_output(servers=tenant.servers, client=client)
+            self.log_console_output(servers=tenant.servers, client=client)
             if tenant.access_point is not None:
-                self._log_console_output(
+                self.log_console_output(
                     servers=[tenant.access_point], client=client)
 
     def _create_protocol_ruleset(self, protocol, port=80):
@@ -534,16 +534,16 @@
         new_tenant = self.primary_tenant
 
         # Create empty security group and add icmp rule in it
-        new_sg = self._create_empty_security_group(
+        new_sg = self.create_empty_security_group(
             namestart='secgroup_new-',
-            tenant_id=new_tenant.creds.tenant_id,
+            project_id=new_tenant.creds.project_id,
             client=new_tenant.manager.security_groups_client)
         icmp_rule = dict(
             protocol='icmp',
             direction='ingress',
         )
         sec_group_rules_client = new_tenant.manager.security_group_rules_client
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=new_sg,
             sec_group_rules_client=sec_group_rules_client,
             **icmp_rule)
@@ -596,7 +596,7 @@
             protocol='icmp',
             direction='ingress'
         )
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=tenant.security_groups['default'],
             **ruleset
         )
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8aa729b..990b325 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -37,7 +37,7 @@
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources()
+        cls.set_network_resources(network=True, subnet=True)
         super(TestServerAdvancedOps, cls).setup_credentials()
 
     @decorators.attr(type='slow')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 02bc692..2a15470 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -52,7 +52,9 @@
             # Obtain a floating IP if floating_ips is enabled
             if (CONF.network_feature_enabled.floating_ips and
                 CONF.network.floating_network_name):
-                self.ip = self.create_floating_ip(self.instance)['ip']
+                fip = self.create_floating_ip(self.instance)
+                self.ip = self.associate_floating_ip(
+                    fip, self.instance)['floating_ip_address']
             else:
                 server = self.servers_client.show_server(
                     self.instance['id'])['server']
@@ -67,7 +69,10 @@
     def verify_metadata(self):
         if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
             # Verify metadata service
-            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+            if CONF.network.public_network_id:
+                md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+            else:
+                md_url = 'http://169.254.169.254/latest/meta-data/local-ipv4'
 
             def exec_cmd_and_verify_output():
                 cmd = 'curl ' + md_url
@@ -125,7 +130,7 @@
     @utils.services('compute', 'network')
     def test_server_basic_ops(self):
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
         self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
         self.instance = self.create_server(
             key_name=keypair['name'],
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..29612ec 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -33,9 +33,18 @@
      * shelve the instance
      * unshelve the instance
      * check the existence of the timestamp file in the unshelved instance
+     * check the existence of the timestamp file in the unshelved instance,
+       after a cold migrate
 
     """
 
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(TestShelveInstance, cls).setup_clients()
+        cls.admin_servers_client = cls.os_admin.servers_client
+
     @classmethod
     def skip_checks(cls):
         super(TestShelveInstance, cls).skip_checks()
@@ -50,10 +59,24 @@
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
 
-    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
+    def _cold_migrate_server(self, server):
+        src_host = self.get_host_for_server(server['id'])
+
+        self.admin_servers_client.migrate_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+
+        dst_host = self.get_host_for_server(server['id'])
+        self.assertNotEqual(src_host, dst_host)
+
+    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False,
+                                                cold_migrate=False):
         keypair = self.create_keypair()
 
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
         security_groups = [{'name': security_group['name']}]
 
         server = self.create_server(
@@ -71,6 +94,10 @@
         # with the instance snapshot
         self._shelve_then_unshelve_server(server)
 
+        if cold_migrate:
+            # Prevent bug #1732428 from coming back
+            self._cold_migrate_server(server)
+
         timestamp2 = self.get_timestamp(instance_ip,
                                         private_key=keypair['private_key'],
                                         server=server)
@@ -91,3 +118,18 @@
     @utils.services('compute', 'volume', 'network', 'image')
     def test_shelve_volume_backed_instance(self):
         self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('1295fd9e-193a-4cf8-b211-55358e021bae')
+    @testtools.skipUnless(CONF.network.public_network_id,
+                          'The public_network_id option must be specified.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.shelve_migrate,
+                          'Shelve migrate not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network', 'image')
+    def test_cold_migrate_unshelved_instance(self):
+        self._create_server_then_shelve_and_unshelve(cold_migrate=True)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index a33d4d4..d04cb9a 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -29,8 +29,10 @@
     The following is the scenario outline:
      * boot an instance and create a timestamp file in it
      * snapshot the instance
+     * add version metadata to the snapshot image
      * boot a second instance from the snapshot
      * check the existence of the timestamp file in the second instance
+     * snapshot the instance again
 
     """
 
@@ -48,7 +50,7 @@
     def test_snapshot_pattern(self):
         # prepare for booting an instance
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # boot an instance and create a timestamp file in it
         server = self.create_server(
@@ -63,6 +65,11 @@
         # snapshot the instance
         snapshot_image = self.create_server_snapshot(server=server)
 
+        # add version metadata to the snapshot image
+        self.image_client.update_image(
+            snapshot_image['id'], [dict(add='/version',
+                                        value='8.0')])
+
         # boot a second instance from the snapshot
         server_from_snapshot = self.create_server(
             image_id=snapshot_image['id'],
@@ -75,3 +82,6 @@
                                         private_key=keypair['private_key'],
                                         server=server_from_snapshot)
         self.assertEqual(timestamp, timestamp2)
+
+        # snapshot the instance again
+        self.create_server_snapshot(server=server_from_snapshot)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c3b3670..4b81b9e 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 import testtools
 
 from tempest.common import utils
@@ -24,7 +23,6 @@
 from tempest.scenario import manager
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class TestStampPattern(manager.ScenarioTest):
@@ -83,7 +81,7 @@
     def test_stamp_pattern(self):
         # prepare for booting an instance
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # boot an instance and create a timestamp file in it
         volume = self.create_volume()
diff --git a/tempest/scenario/test_unified_limits.py b/tempest/scenario/test_unified_limits.py
new file mode 100644
index 0000000..22256b4
--- /dev/null
+++ b/tempest/scenario/test_unified_limits.py
@@ -0,0 +1,435 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from oslo_utils import units
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class ImageQuotaTest(manager.ScenarioTest):
+    credentials = ['primary', 'system_admin']
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImageQuotaTest, cls).resource_setup()
+
+        # Figure out and record the glance service id
+        services = cls.os_system_admin.identity_services_v3_client.\
+            list_services()
+        glance_services = [x for x in services['services']
+                           if x['name'] == 'glance']
+        cls.glance_service_id = glance_services[0]['id']
+
+        # Pre-create all the quota limits and record their IDs so we can
+        # update them in-place without needing to know which ones have been
+        # created and in which order.
+        cls.limit_ids = {}
+
+        try:
+            cls.limit_ids['image_size_total'] = cls._create_limit(
+                'image_size_total', 10)
+            cls.limit_ids['image_stage_total'] = cls._create_limit(
+                'image_stage_total', 10)
+            cls.limit_ids['image_count_total'] = cls._create_limit(
+                'image_count_total', 10)
+            cls.limit_ids['image_count_uploading'] = cls._create_limit(
+                'image_count_uploading', 10)
+        except lib_exc.Forbidden:
+            # If we fail to set limits, it means they are not
+            # registered, and thus we will skip these tests once we
+            # have our os_system_admin client and run
+            # check_quotas_enabled().
+            pass
+
+    def setUp(self):
+        super(ImageQuotaTest, self).setUp()
+        self.created_images = []
+
+    def create_image(self, data=None, **kwargs):
+        """Wrapper that returns a test image."""
+
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(self.__name__ + "-image")
+            kwargs['name'] = name
+
+        params = dict(kwargs)
+        if data:
+            # NOTE: On glance v1 API, the data should be passed on
+            # a header. Then here handles the data separately.
+            params['data'] = data
+
+        image = self.image_client.create_image(**params)
+        # Image objects returned by the v1 client have the image
+        # data inside a dict that is keyed against 'image'.
+        if 'image' in image:
+            image = image['image']
+        self.created_images.append(image['id'])
+        self.addCleanup(
+            self.image_client.wait_for_resource_deletion,
+            image['id'])
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.image_client.delete_image, image['id'])
+        return image
+
+    def check_quotas_enabled(self):
+        # Check to see if we should even be running these tests. Use
+        # the presence of a registered limit that we recognize as an
+        # indication.  This will be set up by the operator (or
+        # devstack) if glance is configured to use/honor the unified
+        # limits. If one is set, they must all be set, because glance
+        # has a single all-or-nothing flag for whether or not to use
+        # keystone limits. If anything, checking only one helps to
+        # assert the assumption that, if enabled, they must all be at
+        # least registered for proper operation.
+        registered_limits = self.os_system_admin.identity_limits_client.\
+            get_registered_limits()['registered_limits']
+        if 'image_count_total' not in [x['resource_name']
+                                       for x in registered_limits]:
+            raise self.skipException('Target system is not configured with '
+                                     'glance unified limits')
+
+    @classmethod
+    def _create_limit(cls, name, value):
+        return cls.os_system_admin.identity_limits_client.create_limit(
+            CONF.identity.region, cls.glance_service_id,
+            cls.image_client.tenant_id, name, value)['limits'][0]['id']
+
+    def _update_limit(self, name, value):
+        self.os_system_admin.identity_limits_client.update_limit(
+            self.limit_ids[name], value)
+
+    def _cleanup_images(self):
+        while self.created_images:
+            image_id = self.created_images.pop()
+            try:
+                self.image_client.delete_image(image_id)
+            except lib_exc.NotFound:
+                pass
+
+    @decorators.idempotent_id('9b74fe24-183b-41e6-bf42-84c2958a7be8')
+    @utils.services('image', 'identity')
+    def test_image_count_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images for our tenant to one.
+        self._update_limit('image_count_total', 1)
+
+        # Create one image
+        image = self.create_image(name='first',
+                                  container_format='bare',
+                                  disk_format='raw',
+                                  visibility='private')
+
+        # Second image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Update our limit to two.
+        self._update_limit('image_count_total', 2)
+
+        # Now the same create should succeed.
+        self.create_image(name='second',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Third image would put us over quota, so expect failure.
+        self.assertRaises(lib_exc.OverLimit,
+                          self.create_image,
+                          name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete the first image to put us under quota.
+        self.image_client.delete_image(image['id'])
+
+        # Now the same create should succeed.
+        self.create_image(name='third',
+                          container_format='bare',
+                          disk_format='raw',
+                          visibility='private')
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('b103788b-5329-4aa9-8b0d-97f8733460db')
+    @utils.services('image', 'identity')
+    def test_image_count_uploading_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Set a quota on the number of images we can have in uploading state.
+        self._update_limit('image_stage_total', 10)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 1)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage an image
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Check that we can not stage another
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image2['id'], io.BytesIO(file_content))
+
+        # ... nor upload directly
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'],
+                          io.BytesIO(file_content))
+
+        # Update our quota to make room
+        self._update_limit('image_count_uploading', 2)
+
+        # Now our upload should work
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # ...and because that is no longer in uploading state, we should be
+        # able to stage our second image from above.
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Finish our import of image2
+        self.image_client.image_import(image2['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image2['id'])
+
+        # Set our quota back to one
+        self._update_limit('image_count_uploading', 1)
+
+        # Since image1 is still staged, we should not be able to upload
+        # an image.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'],
+                          io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our uploading quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Make sure that freed up the one upload quota to complete our upload
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('05e8d064-c39a-4801-8c6a-465df375ec5b')
+    @utils.services('image', 'identity')
+    def test_image_size_quota(self):
+        self.check_quotas_enabled()
+
+        # Set a quota on the image size for our tenant to 1MiB, and allow ten
+        # images.
+        self._update_limit('image_size_total', 1)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and upload a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and upload a second 1MiB image. This succeeds, but
+        # after completion, we are over quota. Despite us being at
+        # quota above, the initial quota check for the second
+        # operation has no idea what the image size will be, and thus
+        # uses delta=0. This will succeed because we're not
+        # technically over-quota and have not asked for any more (this
+        # is oslo.limit behavior). After the second operation,
+        # however, we will be over-quota regardless of the delta and
+        # subsequent attempts will fail. Because glance goes not
+        # require an image size to be declared before upload, this is
+        # really the best it can do without an API change.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.store_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a third 1MiB image. This should fail to
+        # upload (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Increase our size quota to 2MiB.
+        self._update_limit('image_size_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.store_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to upload a fourth 1MiB image. This should
+        # fail to upload (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.store_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Delete our first image to make space in our existing 2MiB quota.
+        self.image_client.delete_image(image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.store_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
+
+    @decorators.idempotent_id('fc76b8d9-aae5-46fb-9285-099e37f311f7')
+    @utils.services('image', 'identity')
+    def test_image_stage_quota(self):
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % __name__)
+            raise self.skipException(skip_msg)
+
+        self.check_quotas_enabled()
+
+        # Create a staging quota of 1MiB, allow 10MiB of active
+        # images, and a total of ten images.
+        self._update_limit('image_stage_total', 1)
+        self._update_limit('image_size_total', 10)
+        self._update_limit('image_count_total', 10)
+        self._update_limit('image_count_uploading', 10)
+
+        file_content = data_utils.random_bytes(1 * units.Mi)
+
+        # Create and stage a 1MiB image.
+        image1 = self.create_image(name='first',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image1['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and stage a second 1MiB image. This succeeds, but
+        # after completion, we are over quota.
+        image2 = self.create_image(name='second',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.image_client.stage_image_file(image2['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a third 1MiB image. This should fail to
+        # stage (but not create) because we are over quota.
+        image3 = self.create_image(name='third',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image3['id'], io.BytesIO(file_content))
+
+        # Make sure that even though we are over our stage quota, we
+        # can still create and upload an image the regular way.
+        image_upload = self.create_image(name='uploaded',
+                                         container_format='bare',
+                                         disk_format='raw',
+                                         visibility='private')
+        self.image_client.store_image_file(image_upload['id'],
+                                           io.BytesIO(file_content))
+
+        # Increase our stage quota to two MiB.
+        self._update_limit('image_stage_total', 2)
+
+        # Now the upload of the already-created image is allowed, but
+        # after completion, we are over quota again.
+        self.image_client.stage_image_file(image3['id'],
+                                           io.BytesIO(file_content))
+
+        # Create and attempt to stage a fourth 1MiB image. This should
+        # fail to stage (but not create) because we are over quota.
+        image4 = self.create_image(name='fourth',
+                                   container_format='bare',
+                                   disk_format='raw',
+                                   visibility='private')
+        self.assertRaises(lib_exc.OverLimit,
+                          self.image_client.stage_image_file,
+                          image4['id'], io.BytesIO(file_content))
+
+        # Finish our import of image1 to make space in our stage quota.
+        self.image_client.image_import(image1['id'], method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.image_client,
+                                                  image1['id'])
+
+        # Now the upload of the already-created image is allowed.
+        self.image_client.stage_image_file(image4['id'],
+                                           io.BytesIO(file_content))
+
+        # Delete all the images we created before the next test runs,
+        # so that it starts with full quota.
+        self._cleanup_images()
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index 8a8c54e..d0885cf 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -70,7 +70,7 @@
 
         # Create keypair and security group
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # Boot a server from the restored backup
         bd_map_v2 = [{
@@ -84,11 +84,11 @@
                                     security_groups=[
                                         {'name': security_group['name']}])
 
-        # Create a floating ip
-        floating_ip = self.create_floating_ip(server)
-
+        # Create a floating ip and associate it to server.
+        fip = self.create_floating_ip(server)
+        floating_ip = self.associate_floating_ip(fip, server)
         # Check server connectivity
-        self.check_vm_connectivity(floating_ip['ip'],
+        self.check_vm_connectivity(floating_ip['floating_ip_address'],
                                    username=CONF.validation.image_ssh_user,
                                    private_key=keypair['private_key'],
                                    should_connect=True)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 0782389..5a5cc27 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -64,7 +64,7 @@
 
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # create an instance from volume
         LOG.info("Booting instance 1 from volume")
@@ -252,8 +252,7 @@
     @utils.services('compute', 'volume')
     def test_boot_server_from_encrypted_volume_luks(self):
         # Create an encrypted volume
-        volume = self.create_encrypted_volume('nova.volume.encryptors.'
-                                              'luks.LuksEncryptor',
+        volume = self.create_encrypted_volume('luks',
                                               volume_type='luks')
 
         self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index 106500e..57d2a1a 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -100,7 +100,7 @@
     def test_volume_retype_attached(self):
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # create volume types
         LOG.info("Creating Volume types")
@@ -156,7 +156,7 @@
     def test_volume_migrate_attached(self):
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         LOG.info("Creating volume")
         # Create a unique volume type to avoid using the backend default
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 9fec548..0d7720e 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -15,9 +15,9 @@
 
 import re
 import time
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest import exceptions
 from tempest.lib.common import rest_client
diff --git a/tempest/test.py b/tempest/test.py
index f383bc1..8ea3b16 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -20,7 +20,6 @@
 import debtcollector.moves
 import fixtures
 from oslo_log import log as logging
-import six
 import testtools
 
 from tempest import clients
@@ -38,12 +37,6 @@
 
 CONF = config.CONF
 
-# TODO(oomichi): This test.idempotent_id should be removed after all projects
-# switch to use decorators.idempotent_id.
-idempotent_id = debtcollector.moves.moved_function(
-    decorators.idempotent_id, 'idempotent_id', __name__,
-    version='Mitaka', removal_version='?')
-
 
 attr = debtcollector.moves.moved_function(
     decorators.attr, 'attr', __name__,
@@ -185,7 +178,7 @@
                      etype, cls.__name__)
             cls.tearDownClass()
             try:
-                six.reraise(etype, value, trace)
+                raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
         finally:
@@ -239,7 +232,7 @@
         # the first one
         if re_raise and etype is not None:
             try:
-                six.reraise(etype, value, trace)
+                raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
 
@@ -302,6 +295,7 @@
         identity_version = cls.get_identity_version()
         # setting force_tenant_isolation to True also needs admin credentials.
         if ('admin' in cls.credentials or
+                'alt_admin' in cls.credentials or
                 getattr(cls, 'force_tenant_isolation', False)):
             if not credentials.is_admin_available(
                     identity_version=identity_version):
@@ -389,7 +383,7 @@
             # This may raise an exception in case credentials are not available
             # In that case we want to let the exception through and the test
             # fail accordingly
-            if isinstance(credentials_type, six.string_types):
+            if isinstance(credentials_type, str):
                 manager = cls.get_client_manager(
                     credential_type=credentials_type)
                 setattr(cls, 'os_%s' % credentials_type, manager)
@@ -420,8 +414,18 @@
                             'alt_manager', 'os_alt', version='Pike',
                             removal_version='Queens')
             elif isinstance(credentials_type, list):
+                scope = 'project'
+                if credentials_type[0].startswith('system'):
+                    scope = 'system'
+                elif credentials_type[0].startswith('domain'):
+                    scope = 'domain'
                 manager = cls.get_client_manager(roles=credentials_type[1:],
-                                                 force_new=True)
+                                                 force_new=True,
+                                                 scope=scope)
+                setattr(cls, 'os_%s' % credentials_type[0], manager)
+                # TODO(gmann): Setting the old style attribute too for
+                # backward compatibility but at some point we should
+                # remove this.
                 setattr(cls, 'os_roles_%s' % credentials_type[0], manager)
 
     @classmethod
@@ -663,7 +667,7 @@
 
     @classmethod
     def get_client_manager(cls, credential_type=None, roles=None,
-                           force_new=None):
+                           force_new=None, scope=None):
         """Returns an OpenStack client manager
 
         Returns an OpenStack client manager based on either credential_type
@@ -671,6 +675,7 @@
         credential_type 'primary'
         :param credential_type: string - primary, alt or admin
         :param roles: list of roles
+        :param scope: scope for the test user
 
         :returns: the created client manager
         :raises skipException: if the requested credentials are not available
@@ -689,7 +694,7 @@
                         " is not able to provide credentials with the %s role "
                         "assigned." % (cls.__name__, role))
                     raise cls.skipException(skip_msg)
-            params = dict(roles=roles)
+            params = dict(roles=roles, scope=scope)
             if force_new is not None:
                 params.update(force_new=force_new)
             creds = cred_provider.get_creds_by_roles(**params)
@@ -853,10 +858,16 @@
         """
         # Get a manager for the given credentials_type, but at least
         # always fall back on getting the manager for primary credentials
-        if isinstance(credentials_type, six.string_types):
+        if isinstance(credentials_type, str):
             manager = cls.get_client_manager(credential_type=credentials_type)
         elif isinstance(credentials_type, list):
-            manager = cls.get_client_manager(roles=credentials_type[1:])
+            scope = 'project'
+            if credentials_type[0].startswith('system'):
+                scope = 'system'
+            elif credentials_type[0].startswith('domain'):
+                scope = 'domain'
+            manager = cls.get_client_manager(roles=credentials_type[1:],
+                                             scope=scope)
         else:
             manager = cls.get_client_manager()
 
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 7a037eb..1d69d9d 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -15,7 +15,6 @@
 import abc
 
 from oslo_log import log as logging
-import six
 import stevedore
 
 from tempest.lib.common.utils import misc
@@ -24,8 +23,7 @@
 LOG = logging.getLogger(__name__)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class TempestPlugin(object):
+class TempestPlugin(object, metaclass=abc.ABCMeta):
     """Provide basic hooks for an external plugin
 
     To provide tempest the necessary information to run the plugin.
@@ -194,11 +192,14 @@
     def get_plugin_load_tests_tuple(self):
         load_tests_dict = {}
         for plug in self.ext_plugins:
+            LOG.info('Loading tests from Tempest plugin: %s', plug.name)
             load_tests_dict[plug.name] = plug.obj.load_tests()
         return load_tests_dict
 
     def register_plugin_opts(self, conf):
         for plug in self.ext_plugins:
+            LOG.info('Register additional config options from Tempest '
+                     'plugin: %s', plug.name)
             try:
                 plug.obj.register_opts(conf)
             except Exception:
@@ -209,6 +210,9 @@
         plugin_options = []
         for plug in self.ext_plugins:
             opt_list = plug.obj.get_opt_lists()
+            LOG.info('List additional config options registered by '
+                     'Tempest plugin: %s', plug.name)
+
             if opt_list:
                 plugin_options.extend(opt_list)
         return plugin_options
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 143c6e1..5816ab1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,8 @@
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
     # Load local tempest tests
-    for test_dir in ['tempest/api', 'tempest/scenario']:
-        full_test_dir = os.path.join(base_path, test_dir)
+    for test_dir in ['api', 'scenario']:
+        full_test_dir = os.path.join(base_path, 'tempest', test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
                                            top_level_dir=base_path))
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 1593464..8a1873b 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -12,10 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from oslo_utils import uuidutils
-import six
 
 from tempest.api.compute import base as compute_base
 from tempest.common import waiters
@@ -128,9 +127,9 @@
             mock.sentinel.server_id, wait_until='active')
         # make our assertions
         if fault:
-            self.assertIn(fault, six.text_type(ex))
+            self.assertIn(fault, str(ex))
         else:
-            self.assertNotIn(fault, six.text_type(ex))
+            self.assertNotIn(fault, str(ex))
         if compute_base.BaseV2ComputeTest.is_requested_microversion_compatible(
             '2.35'):
             status = 'ACTIVE'
diff --git a/tempest/tests/base.py b/tempest/tests/base.py
index 0b53b45..e8b2c98 100644
--- a/tempest/tests/base.py
+++ b/tempest/tests/base.py
@@ -12,7 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
+
 from oslotest import base
 
 
diff --git a/tempest/tests/cmd/sample_streams/calls.subunit b/tempest/tests/cmd/subunit_describe_calls_data/calls.subunit
similarity index 100%
rename from tempest/tests/cmd/sample_streams/calls.subunit
rename to tempest/tests/cmd/subunit_describe_calls_data/calls.subunit
Binary files differ
diff --git a/tempest/tests/cmd/subunit_describe_calls_data/calls_subunit_expected.json b/tempest/tests/cmd/subunit_describe_calls_data/calls_subunit_expected.json
new file mode 100644
index 0000000..53976ee
--- /dev/null
+++ b/tempest/tests/cmd/subunit_describe_calls_data/calls_subunit_expected.json
@@ -0,0 +1,87 @@
+{"bar":[
+      {
+         "name":"AgentsAdminTestJSON:setUp",
+         "request_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"common\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86_64-424013832\", \"os\": \"linux\"}}",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"common\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86_64-424013832\", \"os\": \"linux\", \"agent_id\": 1}}",
+         "response_headers":"{'status': '200', 'content-length': '203', 'x-compute-request-id': 'req-25ddaae2-0ef1-40d1-8228-59bd64a7e75b', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents",
+         "verb":"POST"
+},
+      {
+         "name":"AgentsAdminTestJSON:test_create_agent",
+         "request_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"kvm\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86-252246646\", \"os\": \"win\"}}",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"kvm\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86-252246646\", \"os\": \"win\", \"agent_id\": 2}}",
+         "response_headers":"{'status': '200', 'content-length': '195', 'x-compute-request-id': 'req-b4136f06-c015-4e7e-995f-c43831e3ecce', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents",
+         "verb":"POST"
+},
+      {
+         "name":"AgentsAdminTestJSON:tearDown",
+         "request_body":"None",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"",
+         "response_headers":"{'status': '200', 'content-length': '0', 'x-compute-request-id': 'req-ee905fd6-a5b5-4da4-8c37-5363cb25bd9d', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents/1",
+         "verb":"DELETE"
+},
+      {
+         "name":"AgentsAdminTestJSON:_run_cleanups",
+         "request_body":"None",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_headers":"{'status': '200', 'content-length': '0', 'x-compute-request-id': 'req-e912cac0-63e0-4679-a68a-b6d18ddca074', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents/2",
+         "verb":"DELETE"
+}], "foo":[
+      {
+         "name":"AgentsAdminTestJSON:setUp",
+         "request_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"common\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86_64-948635295\", \"os\": \"linux\"}}",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"{\"agent\": {\"url\": \"xxx://xxxx/xxx/xxx\", \"hypervisor\": \"common\", \"md5hash\": \"add6bb58e139be103324d04d82d8f545\", \"version\": \"7.0\", \"architecture\": \"tempest-x86_64-948635295\", \"os\": \"linux\", \"agent_id\": 3}}",
+         "response_headers":"{'status': '200', 'content-length': '203', 'x-compute-request-id': 'req-ccd2116d-04b1-4ffe-ae32-fb623f68bf1c', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents",
+         "verb":"POST"
+},
+      {
+         "name":"AgentsAdminTestJSON:test_delete_agent",
+         "request_body":"None",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"",
+         "response_headers":"{'status': '200', 'content-length': '0', 'x-compute-request-id': 'req-6e7fa28f-ae61-4388-9a78-947c58bc0588', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents/3",
+         "verb":"DELETE"
+},
+      {
+         "name":"AgentsAdminTestJSON:test_delete_agent",
+         "request_body":"None",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_body":"{\"agents\": []}",
+         "response_headers":"{'status': '200', 'content-length': '14', 'content-location': 'http://23.253.76.97:8774/v2.1/cf6b1933fe5b476fbbabb876f6d1b924/os-agents', 'x-compute-request-id': 'req-e41aa9b4-41a6-4138-ae04-220b768eb644', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': 'application/json'}",
+         "service":"Nova",
+         "status_code":"200",
+         "url":"v2.1/<id>/os-agents",
+         "verb":"GET"
+},
+      {
+         "name":"AgentsAdminTestJSON:tearDown",
+         "request_body":"None",
+         "request_headers":"{'Content-Type': 'application/json', 'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
+         "response_headers":"{'status': '404', 'content-length': '82', 'x-compute-request-id': 'req-e297aeea-91cf-4f26-b49c-8f46b1b7a926', 'vary': 'X-OpenStack-Nova-API-Version', 'connection': 'close', 'x-openstack-nova-api-version': '2.1', 'date': 'Tue, 02 Feb 2016 03:27:02 GMT', 'content-type': 'application/json; charset=UTF-8'}",
+         "service":"Nova",
+         "status_code":"404",
+         "url":"v2.1/<id>/os-agents/3",
+         "verb":"DELETE"
+}]}
\ No newline at end of file
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index a962e37..7d764be 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from unittest import mock
+
 import fixtures
-import mock
 from oslo_config import cfg
 
 from tempest.cmd import account_generator
@@ -336,3 +337,24 @@
     def setUp(self):
         self.mock_domains()
         super(TestDumpAccountsV3, self).setUp()
+
+
+class TestAccountGeneratorCliCheck(base.TestCase):
+
+    def setUp(self):
+        super(TestAccountGeneratorCliCheck, self).setUp()
+        self.account_generator = account_generator.TempestAccountGenerator(
+            app=mock.Mock(), app_args=mock.Mock())
+        self.parser = self.account_generator.get_parser("generator")
+
+    def test_account_generator_zero_concurrency(self):
+        error = self.assertRaises(
+            SystemExit, lambda: self.parser.parse_args(
+                ['-r', '0', 'accounts_file.yaml']))
+        self.assertTrue(error.code != 0)
+
+    def test_account_generator_negative_concurrency(self):
+        error = self.assertRaises(
+            SystemExit, lambda: self.parser.parse_args(
+                ['-r', '-1', 'accounts_file.yaml']))
+        self.assertTrue(error.code != 0)
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
index 1618df9..69e735b 100644
--- a/tempest/tests/cmd/test_cleanup.py
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import mock
+from unittest import mock
 
 from tempest.cmd import cleanup
 from tempest.tests import base
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 8366290..2301be6 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -143,40 +143,40 @@
 
     saved_state = {
         # Static list to ensure global service saved items are not deleted
-        "users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
-        "flavors": {u'42': u'm1.tiny'},
-        "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
-        "roles": {u'3efrt74r45hn': u'president'},
-        "projects": {u'f38ohgp93jj032': u'manhattan'},
-        "domains": {u'default': u'Default'},
+        "users": {'32rwef64245tgr20121qw324bgg': 'Lightning'},
+        "flavors": {'42': 'm1.tiny'},
+        "images": {'34yhwr-4t3q': 'stratus-0.3.2-x86_64-disk'},
+        "roles": {'3efrt74r45hn': 'president'},
+        "projects": {'f38ohgp93jj032': 'manhattan'},
+        "domains": {'default': 'Default'},
         # Static list to ensure project service saved items are not deleted
-        "snapshots": {u'1ad4c789-7e8w-4dwg-afc5': u'saved-snapshot'},
-        "servers": {u'7a6d4v7w-36ds-4216': u'saved-server'},
-        "server_groups": {u'as6d5f7g-46ca-475e': u'saved-server-group'},
-        "keypairs": {u'saved-key-pair': {
-            u'fingerprint': u'7e:eb:ab:24',
-            u'name': u'saved-key-pair'
+        "snapshots": {'1ad4c789-7e8w-4dwg-afc5': 'saved-snapshot'},
+        "servers": {'7a6d4v7w-36ds-4216': 'saved-server'},
+        "server_groups": {'as6d5f7g-46ca-475e': 'saved-server-group'},
+        "keypairs": {'saved-key-pair': {
+            'fingerprint': '7e:eb:ab:24',
+            'name': 'saved-key-pair'
         }},
-        "volumes": {u'aa77asdf-1234': u'saved-volume'},
-        "networks": {u'6722fc13-4319': {
-            u'id': u'6722fc13-4319',
-            u'name': u'saved-network'
+        "volumes": {'aa77asdf-1234': 'saved-volume'},
+        "networks": {'6722fc13-4319': {
+            'id': '6722fc13-4319',
+            'name': 'saved-network'
         }},
-        "floatingips": {u'9e82d248-408a': {
-            u'id': u'9e82d248-408a',
-            u'status': u'ACTIVE'
+        "floatingips": {'9e82d248-408a': {
+            'id': '9e82d248-408a',
+            'status': 'ACTIVE'
         }},
-        "routers": {u'4s5w34hj-id44': u'saved-router'},
-        "metering_label_rules": {u'93a973ce-4dc5': {
-            u'direction': u'ingress',
-            u'id': u'93a973ce-4dc5'
+        "routers": {'4s5w34hj-id44': 'saved-router'},
+        "metering_label_rules": {'93a973ce-4dc5': {
+            'direction': 'ingress',
+            'id': '93a973ce-4dc5'
         }},
-        "metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
-        "ports": {u'aa74aa4v-741a': u'saved-port'},
-        "security_groups": {u'7q844add-3697': u'saved-sec-group'},
-        "subnets": {u'55ttda4a-2584': u'saved-subnet'},
-        "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'},
-        "regions": {u'RegionOne': {}}
+        "metering_labels": {'723b346ce866-4c7q': 'saved-label'},
+        "ports": {'aa74aa4v-741a': 'saved-port'},
+        "security_groups": {'7q844add-3697': 'saved-sec-group'},
+        "subnets": {'55ttda4a-2584': 'saved-subnet'},
+        "subnetpools": {'8acf64c1-43fc': 'saved-subnet-pool'},
+        "regions": {'RegionOne': {}}
     }
     # Mocked methods
     get_method = 'tempest.lib.common.rest_client.RestClient.get'
@@ -274,19 +274,22 @@
                     "name": "test"
                 },
                 "name": "test-volume-snapshot",
-                "user_id": "40c2102f4a554b848d96b14f3eec39ed",
                 "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
                 "created_at": "2015-11-29T02:25:51.000000",
                 "size": 1,
                 "updated_at": "2015-11-20T05:36:40.000000",
-                "os-extended-snapshot-attributes:progress": "100%",
                 "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
                 "description": "volume snapshot"
             },
             {
                 "status": "available",
                 "name": "saved-snapshot",
+                "metadata": {},
                 "id": "1ad4c789-7e8w-4dwg-afc5",
+                "size": 1,
+                "volume_id": "af7c41be-1ff6-4233-a690-7ed61c34347f",
+                "created_at": "2015-11-20T05:39:40.000000",
+                "updated_at": "2015-11-20T05:39:40.000000",
                 "description": "snapshot in saved state"
             }
         ]
@@ -508,7 +511,8 @@
             },
             {
                 "id": "aa77asdf-1234",
-                "name": "saved-volume"
+                "name": "saved-volume",
+                "links": [],
             }
         ]
     }
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 8997a4c..3b5e901 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -18,10 +18,9 @@
 import shutil
 import subprocess
 import tempfile
+from unittest import mock
 
 import fixtures
-import mock
-import six
 
 from tempest.cmd import run
 from tempest.cmd import workspace
@@ -29,10 +28,6 @@
 from tempest.lib.common.utils import data_utils
 from tempest.tests import base
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 DEVNULL = open(os.devnull, 'wb')
 atexit.register(DEVNULL.close)
 
@@ -72,6 +67,11 @@
 
 
 class TestRunReturnCode(base.TestCase):
+
+    exclude_regex = '--exclude-regex'
+    exclude_list = '--exclude-list'
+    include_list = '--include-list'
+
     def setUp(self):
         super(TestRunReturnCode, self).setUp()
         # Setup test dirs
@@ -96,6 +96,14 @@
         self.addCleanup(os.chdir, os.path.abspath(os.curdir))
         os.chdir(self.directory)
 
+    def _get_test_list_file(self, content):
+        fd, path = tempfile.mkstemp()
+        self.addCleanup(os.remove, path)
+        test_file = os.fdopen(fd, 'wb', 0)
+        self.addCleanup(test_file.close)
+        test_file.write(content.encode('utf-8'))
+        return path
+
     def assertRunExit(self, cmd, expected):
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
@@ -119,19 +127,23 @@
         subprocess.call(['stestr', 'init'])
         self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
 
-    def test_tempest_run_blackregex_failing(self):
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+    def test_tempest_run_exclude_regex_failing(self):
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'failing'], 0)
 
-    def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+    def test_tempest_run_exclude_regex_failing_with_stestr_repository(self):
         subprocess.call(['stestr', 'init'])
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'failing'], 0)
 
-    def test_tempest_run_blackregex_passing(self):
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+    def test_tempest_run_exclude_regex_passing(self):
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'passing'], 1)
 
-    def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+    def test_tempest_run_exclude_regex_passing_with_stestr_repository(self):
         subprocess.call(['stestr', 'init'])
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'passing'], 1)
 
     def test_tempest_run_fails(self):
         self.assertRunExit(['tempest', 'run'], 1)
@@ -140,52 +152,44 @@
         subprocess.call(['stestr', 'init'])
         out, err = self.assertRunExit(['tempest', 'run', '-l'], 0)
         tests = out.split()
-        tests = sorted([six.text_type(x.rstrip()) for x in tests if x])
+        tests = sorted([str(x.rstrip()) for x in tests if x])
         result = [
-            six.text_type('tests.test_failing.FakeTestClass.test_pass'),
-            six.text_type('tests.test_failing.FakeTestClass.test_pass_list'),
-            six.text_type('tests.test_passing.FakeTestClass.test_pass'),
-            six.text_type('tests.test_passing.FakeTestClass.test_pass_list'),
+            str('tests.test_failing.FakeTestClass.test_pass'),
+            str('tests.test_failing.FakeTestClass.test_pass_list'),
+            str('tests.test_passing.FakeTestClass.test_pass'),
+            str('tests.test_passing.FakeTestClass.test_pass_list'),
         ]
         # NOTE(mtreinish): on python 3 the subprocess prints b'' around
         # stdout.
-        if six.PY3:
-            result = ["b\'" + x + "\'" for x in result]
+        result = ["b\'" + x + "\'" for x in result]
         self.assertEqual(result, tests)
 
-    def test_tempest_run_with_whitelist(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path], 0)
+    def test_tempest_run_with_worker_file(self):
+        path = self._get_test_list_file(
+            '- worker:\n  - passing\n  concurrency: 3')
+        self.assertRunExit(['tempest', 'run', '--worker-file=%s' % path], 0)
 
-    def test_tempest_run_with_whitelist_regex_include_pass_check_fail(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_list(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path)], 0)
+
+    def test_tempest_run_with_include_regex_include_pass_check_fail(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'fail'], 1)
 
-    def test_tempest_run_with_whitelist_regex_include_pass_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_regex_include_pass_check_pass(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'passing'], 0)
 
-    def test_tempest_run_with_whitelist_regex_include_fail_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_regex_include_fail_check_pass(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'pass'], 1)
 
     def test_tempest_run_passes_with_config_file(self):
@@ -193,50 +197,75 @@
                             '--config-file', self.stestr_conf_file,
                             '--regex', 'passing'], 0)
 
-    def test_tempest_run_with_blacklist_failing(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 0)
+    def test_tempest_run_with_exclude_list_failing(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path)], 0)
 
-    def test_tempest_run_with_blacklist_passing(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 1)
+    def test_tempest_run_with_exclude_list_passing(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path)], 1)
 
-    def test_tempest_run_with_blacklist_regex_exclude_fail_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_fail_check_pass(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'pass'], 0)
 
-    def test_tempest_run_with_blacklist_regex_exclude_pass_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_pass_check_pass(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'pass'], 1)
 
-    def test_tempest_run_with_blacklist_regex_exclude_pass_check_fail(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_pass_check_fail(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'fail'], 1)
 
 
+class TestOldArgRunReturnCode(TestRunReturnCode):
+    """A class for testing deprecated but still supported args.
+
+    This class will be removed once we remove the following arguments:
+      * --black-regex
+      * --blacklist-file
+      * --whitelist-file
+    """
+    exclude_regex = '--black-regex'
+    exclude_list = '--blacklist-file'
+    include_list = '--whitelist-file'
+
+    def _test_args_passing(self, args):
+        self.assertRunExit(['tempest', 'run'] + args, 0)
+
+    def test_tempest_run_new_old_arg_comb(self):
+        path = self._get_test_list_file('failing')
+        self._test_args_passing(['--black-regex', 'failing',
+                                 '--exclude-regex', 'failing'])
+        self._test_args_passing(['--blacklist-file=' + path,
+                                 '--exclude-list=' + path])
+        path = self._get_test_list_file('passing')
+        self._test_args_passing(['--whitelist-file=' + path,
+                                 '--include-list=' + path])
+
+    def _test_args_passing_with_stestr_repository(self, args):
+        subprocess.call(['stestr', 'init'])
+        self.assertRunExit(['tempest', 'run'] + args, 0)
+
+    def test_tempest_run_new_old_arg_comb_with_stestr_repository(self):
+        path = self._get_test_list_file('failing')
+        self._test_args_passing_with_stestr_repository(
+            ['--black-regex', 'failing', '--exclude-regex', 'failing'])
+        self._test_args_passing_with_stestr_repository(
+            ['--blacklist-file=' + path, '--exclude-list=' + path])
+        path = self._get_test_list_file('passing')
+        self._test_args_passing_with_stestr_repository(
+            ['--whitelist-file=' + path, '--include-list=' + path])
+
+
 class TestConfigPathCheck(base.TestCase):
     def setUp(self):
         super(TestConfigPathCheck, self).setUp()
diff --git a/tempest/tests/cmd/test_subunit_describe_calls.py b/tempest/tests/cmd/test_subunit_describe_calls.py
index cb34ba6..4fed84a 100644
--- a/tempest/tests/cmd/test_subunit_describe_calls.py
+++ b/tempest/tests/cmd/test_subunit_describe_calls.py
@@ -14,220 +14,422 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import argparse
+from io import StringIO
 import os
+import shutil
 import subprocess
+import sys
 import tempfile
+from unittest import mock
+from unittest.mock import patch
 
+
+from oslo_serialization import jsonutils as json
 from tempest.cmd import subunit_describe_calls
 from tempest.tests import base
 
 
-class TestSubunitDescribeCalls(base.TestCase):
-    def test_return_code(self):
-        subunit_file = os.path.join(
-            os.path.dirname(os.path.abspath(__file__)),
-            'sample_streams/calls.subunit')
-        p = subprocess.Popen([
-            'subunit-describe-calls', '-s', subunit_file,
-            '-o', tempfile.mkstemp()[1]], stdin=subprocess.PIPE)
-        p.communicate()
-        self.assertEqual(0, p.returncode)
+class TestArgumentParser(base.TestCase):
+    def test_init(self):
+        test_object = subunit_describe_calls.ArgumentParser()
+        self.assertEqual("subunit-describe-calls", test_object.prog)
+        self.assertEqual(subunit_describe_calls.DESCRIPTION,
+                         test_object.description)
 
-    def test_verbose(self):
-        subunit_file = os.path.join(
-            os.path.dirname(os.path.abspath(__file__)),
-            'sample_streams/calls.subunit')
-        p = subprocess.Popen([
-            'subunit-describe-calls', '-s', subunit_file,
-            '-v'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
-        stdout = p.communicate()
-        self.assertEqual(0, p.returncode)
-        self.assertIn(b'- request headers:', stdout[0])
-        self.assertIn(b'- request body:', stdout[0])
-        self.assertIn(b'- response headers:', stdout[0])
-        self.assertIn(b'- response body:', stdout[0])
 
-    def test_return_code_no_output(self):
-        subunit_file = os.path.join(
+class TestUrlParser(base.TestCase):
+    services_custom_ports = {
+        "18776": "Block Storage",
+        "18774": "Nova",
+        "18773": "Nova-API",
+        "18386": "Sahara",
+        "35358": "Keystone",
+        "19292": "Glance",
+        "19696": "Neutron",
+        "16000": "Swift",
+        "18004": "Heat",
+        "18777": "Ceilometer",
+        "10080": "Horizon",
+        "18080": "Swift",
+        "1873": "rsync",
+        "13260": "iSCSI",
+        "13306": "MySQL",
+        "15672": "AMQP",
+        "18082": "murano"}
+
+    def setUp(self):
+        super(TestUrlParser, self).setUp()
+        self.test_object = subunit_describe_calls.UrlParser()
+
+    def test_get_service_default_ports(self):
+        base_url = "http://site.something.com:"
+        for port in self.test_object.services:
+            url = base_url + port + "/v2/action"
+            service = self.test_object.services[port]
+            self.assertEqual(service, self.test_object.get_service(url))
+
+    def test_get_service_custom_ports(self):
+        self.test_object = subunit_describe_calls.\
+            UrlParser(services=self.services_custom_ports)
+        base_url = "http://site.something.com:"
+        for port in self.services_custom_ports:
+            url = base_url + port + "/v2/action"
+            service = self.services_custom_ports[port]
+            self.assertEqual(service, self.test_object.get_service(url))
+
+    def test_get_service_port_not_found(self):
+        url = "https://site.somewhere.com:1234/v2/action"
+        self.assertEqual("Unknown", self.test_object.get_service(url))
+        self.assertEqual("Unknown", self.test_object.get_service(""))
+
+    def test_parse_details_none(self):
+        self.assertIsNone(self.test_object.parse_details(None))
+
+    def test_url_path_ports(self):
+        uuid_sample1 = "3715e0bb-b1b3-4291-aa13-2c86c3b9ec93"
+        uuid_sample2 = "2715e0bb-b1b4-4291-aa13-2c86c3b9ec88"
+
+        # test http url
+        host = "http://host.company.com"
+        url = host + ":8776/v3/" + uuid_sample1 + "/types/" + \
+            uuid_sample2 + "/extra_specs"
+        self.assertEqual("v3/<uuid>/types/<uuid>/extra_specs",
+                         self.test_object.url_path(url))
+        url = host + ":8774/v2.1/servers/" + uuid_sample1
+        self.assertEqual("v2.1/servers/<uuid>",
+                         self.test_object.url_path(url))
+        # test https url
+        host = "https://host.company.com"
+        url = host + ":8776/v3/" + uuid_sample1 + "/types/" + \
+            uuid_sample2 + "/extra_specs"
+        self.assertEqual("v3/<uuid>/types/<uuid>/extra_specs",
+                         self.test_object.url_path(url))
+        url = host + ":8774/v2.1/servers/" + uuid_sample1
+        self.assertEqual("v2.1/servers/<uuid>",
+                         self.test_object.url_path(url))
+
+    def test_url_path_no_match(self):
+        host_port = 'https://host.company.com:1234/'
+        url = 'v2/action/no/special/data'
+        self.assertEqual(url, self.test_object.url_path(host_port + url))
+        url = 'data'
+        self.assertEqual(url, self.test_object.url_path(url))
+
+
+class TestCliBase(base.TestCase):
+    """Base class for share code on all CLI sub-process testing"""
+
+    def setUp(self):
+        super(TestCliBase, self).setUp()
+        self._subunit_file = os.path.join(
             os.path.dirname(os.path.abspath(__file__)),
-            'sample_streams/calls.subunit')
+            'subunit_describe_calls_data', 'calls.subunit')
+
+    def _bytes_to_string(self, data):
+        if isinstance(data, (bytes, bytearray)):
+            data = str(data, 'utf-8')
+        return data
+
+    def _assert_cli_message(self, data):
+        data = self._bytes_to_string(data)
+        self.assertIn("Running subunit_describe_calls ...", data)
+
+    def _assert_deprecated_warning(self, stdout):
+        self.assertIn(
+            b"Use of: 'subunit-describe-calls' is deprecated, "
+            b"please use: 'tempest subunit-describe-calls'", stdout)
+
+    def _assert_expect_json(self, json_data):
+        expected_file_name = os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            'subunit_describe_calls_data', 'calls_subunit_expected.json')
+        with open(expected_file_name, "rb") as read_file:
+            expected_result = json.load(read_file)
+        self.assertDictEqual(expected_result, json_data)
+
+    def _assert_headers_and_bodies(self, data):
+        data = self._bytes_to_string(data)
+        self.assertIn('- request headers:', data)
+        self.assertIn('- request body:', data)
+        self.assertIn('- response headers:', data)
+        self.assertIn('- response body:', data)
+
+    def _assert_methods_details(self, data):
+        data = self._bytes_to_string(data)
+        self.assertIn('foo', data)
+        self.assertIn('- 200 POST request for Nova to v2.1/<id>/',
+                      data)
+        self.assertIn('- 200 DELETE request for Nova to v2.1/<id>/',
+                      data)
+        self.assertIn('- 200 GET request for Nova to v2.1/<id>/',
+                      data)
+        self.assertIn('- 404 DELETE request for Nova to v2.1/<id>/',
+                      data)
+
+    def _assert_mutual_exclusive_message(self, stderr):
+        self.assertIn(b"usage: subunit-describe-calls "
+                      b"[-h] [-s [<subunit file>]]", stderr)
+        self.assertIn(b"[-n <non subunit name>] [-o <output file>]",
+                      stderr)
+        self.assertIn(b"[-p <ports file>] [-v | -a]", stderr)
+        self.assertIn(
+            b"subunit-describe-calls: error: argument -v/--verbose: "
+            b"not allowed with argument -a/--all-stdout", stderr)
+
+    def _assert_no_headers_and_bodies(self, data):
+        data = self._bytes_to_string(data)
+        self.assertNotIn('- request headers:', data)
+        self.assertNotIn('- request body:', data)
+        self.assertNotIn('- response headers:', data)
+        self.assertNotIn('- response body:', data)
+
+
+class TestMainCli(TestCliBase):
+    """Test cases that use subunit_describe_calls module main interface
+
+    via subprocess calls to make sure the total user experience
+    is well defined and tested. This interface is deprecated.
+    Note: these test do not affect code coverage percentages.
+    """
+
+    def test_main_output_file(self):
+        temp_file = tempfile.mkstemp()[1]
         p = subprocess.Popen([
-            'subunit-describe-calls', '-s', subunit_file],
-            stdin=subprocess.PIPE, stdout=subprocess.PIPE)
-        stdout = p.communicate()
+            'subunit-describe-calls', '-s', self._subunit_file,
+            '-o', temp_file], stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
         self.assertEqual(0, p.returncode)
-        self.assertIn(b'foo', stdout[0])
-        self.assertIn(b'- 200 POST request for Nova to v2.1/<id>/',
-                      stdout[0])
-        self.assertIn(b'- 200 DELETE request for Nova to v2.1/<id>/',
-                      stdout[0])
-        self.assertIn(b'- 200 GET request for Nova to v2.1/<id>/',
-                      stdout[0])
-        self.assertIn(b'- 404 DELETE request for Nova to v2.1/<id>/',
-                      stdout[0])
-        self.assertNotIn(b'- request headers:', stdout[0])
-        self.assertNotIn(b'- request body:', stdout[0])
-        self.assertNotIn(b'- response headers:', stdout[0])
-        self.assertNotIn(b'- response body:', stdout[0])
+        self._assert_cli_message(stdout)
+        self._assert_deprecated_warning(stdout)
+        with open(temp_file, 'r') as file:
+            data = json.loads(file.read())
+        self._assert_expect_json(data)
+
+    def test_main_verbose(self):
+        p = subprocess.Popen([
+            'subunit-describe-calls', '-s', self._subunit_file,
+            '-v'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_deprecated_warning(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_headers_and_bodies(stdout)
+
+    def test_main_all_stdout(self):
+        p = subprocess.Popen([
+            'subunit-describe-calls', '-s', self._subunit_file,
+            '--all-stdout'], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_deprecated_warning(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_headers_and_bodies(stdout)
+
+    def test_main(self):
+        p = subprocess.Popen([
+            'subunit-describe-calls', '-s', self._subunit_file],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_deprecated_warning(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_no_headers_and_bodies(stdout)
+
+    def test_main_verbose_and_all_stdout(self):
+        p = subprocess.Popen([
+            'subunit-describe-calls', '-s', self._subunit_file,
+            '-a', '-v'],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(2, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_deprecated_warning(stdout)
+        self._assert_mutual_exclusive_message(stderr)
+
+
+class TestCli(TestCliBase):
+    """Test cases that use tempest subunit_describe_calls cliff interface
+
+    via subprocess calls to make sure the total user experience
+    is well defined and tested.
+    Note: these test do not affect code coverage percentages.
+    """
+
+    def _assert_cliff_verbose(self, stdout):
+        self.assertIn(b'tempest initialize_app', stdout)
+        self.assertIn(b'prepare_to_run_command TempestSubunitDescribeCalls',
+                      stdout)
+        self.assertIn(b'tempest clean_up TempestSubunitDescribeCalls',
+                      stdout)
+
+    def test_run_all_stdout(self):
+        p = subprocess.Popen(['tempest', 'subunit-describe-calls',
+                              '-s', self._subunit_file, '-a'],
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_headers_and_bodies(stdout)
+
+    def test_run_verbose(self):
+        p = subprocess.Popen(['tempest', 'subunit-describe-calls',
+                              '-s', self._subunit_file, '-v'],
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_no_headers_and_bodies(stdout)
+        self._assert_cliff_verbose(stderr)
+
+    def test_run_min(self):
+        p = subprocess.Popen(['tempest', 'subunit-describe-calls',
+                              '-s', self._subunit_file],
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_methods_details(stdout)
+        self._assert_no_headers_and_bodies(stdout)
+
+    def test_run_verbose_all_stdout(self):
+        """Test Cliff -v argument
+
+        Since Cliff framework has a argument at the
+        abstract command level the -v or --verbose for
+        this command is not processed as a boolean.
+        So the use of verbose only exists for the
+        deprecated main CLI interface.  When the
+        main is deleted this test would not be needed.
+        """
+        p = subprocess.Popen(['tempest', 'subunit-describe-calls',
+                              '-s', self._subunit_file, '-a', '-v'],
+                             stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        stdout, stderr = p.communicate()
+        self.assertEqual(0, p.returncode)
+        self._assert_cli_message(stdout)
+        self._assert_cliff_verbose(stderr)
+        self._assert_methods_details(stdout)
+
+
+class TestSubunitDescribeCalls(TestCliBase):
+    """Test cases use the subunit_describe_calls module interface
+
+    and effect code coverage reporting
+    """
+
+    def setUp(self):
+        super(TestSubunitDescribeCalls, self).setUp()
+        self.test_object = subunit_describe_calls.TempestSubunitDescribeCalls(
+            app=mock.Mock(),
+            app_args=mock.Mock(spec=argparse.Namespace))
 
     def test_parse(self):
-        subunit_file = os.path.join(
-            os.path.dirname(os.path.abspath(__file__)),
-            'sample_streams/calls.subunit')
-        parser = subunit_describe_calls.parse(
-            open(subunit_file), "pythonlogging", None)
-        expected_result = {
-            'bar': [{
-                'name': 'AgentsAdminTestJSON:setUp',
-                'request_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "common", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86_64-424013832", "os": "linux"}}',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "common", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86_64-424013832", "os": "linux", '
-                '"agent_id": 1}}',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'203', 'x-compute-request-id': "
-                "'req-25ddaae2-0ef1-40d1-8228-59bd64a7e75b', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents',
-                'verb': 'POST'}, {
-                'name': 'AgentsAdminTestJSON:test_create_agent',
-                'request_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "kvm", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86-252246646", "os": "win"}}',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "kvm", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86-252246646", "os": "win", '
-                '"agent_id": 2}}',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'195', 'x-compute-request-id': "
-                "'req-b4136f06-c015-4e7e-995f-c43831e3ecce', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents',
-                'verb': 'POST'}, {
-                'name': 'AgentsAdminTestJSON:tearDown',
-                'request_body': 'None',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'0', 'x-compute-request-id': "
-                "'req-ee905fd6-a5b5-4da4-8c37-5363cb25bd9d', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents/1',
-                'verb': 'DELETE'}, {
-                'name': 'AgentsAdminTestJSON:_run_cleanups',
-                'request_body': 'None',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_headers': "{'status': '200', 'content-length': "
-                "'0', 'x-compute-request-id': "
-                "'req-e912cac0-63e0-4679-a68a-b6d18ddca074', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:00 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents/2',
-                'verb': 'DELETE'}],
-            'foo': [{
-                'name': 'AgentsAdminTestJSON:setUp',
-                'request_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "common", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86_64-948635295", "os": "linux"}}',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '{"agent": {"url": "xxx://xxxx/xxx/xxx", '
-                '"hypervisor": "common", "md5hash": '
-                '"add6bb58e139be103324d04d82d8f545", "version": "7.0", '
-                '"architecture": "tempest-x86_64-948635295", "os": "linux", '
-                '"agent_id": 3}}',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'203', 'x-compute-request-id': "
-                "'req-ccd2116d-04b1-4ffe-ae32-fb623f68bf1c', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents',
-                'verb': 'POST'}, {
-                'name': 'AgentsAdminTestJSON:test_delete_agent',
-                'request_body': 'None',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'0', 'x-compute-request-id': "
-                "'req-6e7fa28f-ae61-4388-9a78-947c58bc0588', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents/3',
-                'verb': 'DELETE'}, {
-                'name': 'AgentsAdminTestJSON:test_delete_agent',
-                'request_body': 'None',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_body': '{"agents": []}',
-                'response_headers': "{'status': '200', 'content-length': "
-                "'14', 'content-location': "
-                "'http://23.253.76.97:8774/v2.1/"
-                "cf6b1933fe5b476fbbabb876f6d1b924/os-agents', "
-                "'x-compute-request-id': "
-                "'req-e41aa9b4-41a6-4138-ae04-220b768eb644', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:01 GMT', 'content-type': "
-                "'application/json'}",
-                'service': 'Nova',
-                'status_code': '200',
-                'url': 'v2.1/<id>/os-agents',
-                'verb': 'GET'}, {
-                'name': 'AgentsAdminTestJSON:tearDown',
-                'request_body': 'None',
-                'request_headers': "{'Content-Type': 'application/json', "
-                "'Accept': 'application/json', 'X-Auth-Token': '<omitted>'}",
-                'response_headers': "{'status': '404', 'content-length': "
-                "'82', 'x-compute-request-id': "
-                "'req-e297aeea-91cf-4f26-b49c-8f46b1b7a926', 'vary': "
-                "'X-OpenStack-Nova-API-Version', 'connection': 'close', "
-                "'x-openstack-nova-api-version': '2.1', 'date': "
-                "'Tue, 02 Feb 2016 03:27:02 GMT', 'content-type': "
-                "'application/json; charset=UTF-8'}",
-                'service': 'Nova',
-                'status_code': '404',
-                'url': 'v2.1/<id>/os-agents/3',
-                'verb': 'DELETE'}]}
+        with open(self._subunit_file, 'r') as read_file:
+            parser = subunit_describe_calls.parse(
+                read_file, "pythonlogging", None)
+        self._assert_expect_json(parser.test_logs)
 
-        self.assertEqual(expected_result, parser.test_logs)
+    def test_get_description(self):
+        self.assertEqual(subunit_describe_calls.DESCRIPTION,
+                         self.test_object.get_description())
+
+    def test_get_parser_default_min(self):
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args([])
+        self.assertIsNone(parsed_args.output_file)
+        self.assertIsNone(parsed_args.ports)
+        self.assertFalse(parsed_args.all_stdout)
+        self.assertEqual(parsed_args.subunit, sys.stdin)
+
+    def test_get_parser_default_max(self):
+        temp_dir = tempfile.mkdtemp(prefix="parser")
+        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+        outfile_name = os.path.join(temp_dir, 'output.json')
+        open(outfile_name, 'a').close()
+        portfile_name = os.path.join(temp_dir, 'ports.json')
+        open(portfile_name, 'a').close()
+
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(["-a", "-o " + outfile_name,
+                                         "-p " + portfile_name])
+
+        self.assertIsNotNone(parsed_args.output_file)
+        self.assertIsNotNone(parsed_args.ports)
+        self.assertTrue(parsed_args.all_stdout)
+        self.assertEqual(parsed_args.subunit, sys.stdin)
+
+    def test_take_action_min(self):
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(["-s" + self._subunit_file],)
+        with patch('sys.stdout', new=StringIO()) as mock_stdout:
+            self.test_object.take_action(parsed_args)
+
+        stdout_data = mock_stdout.getvalue()
+        self._assert_methods_details(stdout_data)
+        self._assert_no_headers_and_bodies(stdout_data)
+
+    def test_take_action_all_stdout(self):
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(["-as" + self._subunit_file],)
+        with patch('sys.stdout', new=StringIO()) as mock_stdout:
+            self.test_object.take_action(parsed_args)
+
+        stdout_data = mock_stdout.getvalue()
+        self._assert_methods_details(stdout_data)
+        self._assert_headers_and_bodies(stdout_data)
+
+    def test_take_action_outfile_files(self):
+        temp_file = tempfile.mkstemp()[1]
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(
+            ["-as" + self._subunit_file, '-o', temp_file], )
+        with patch('sys.stdout', new=StringIO()) as mock_stdout:
+            self.test_object.take_action(parsed_args)
+        stdout_data = mock_stdout.getvalue()
+        self._assert_cli_message(stdout_data)
+        with open(temp_file, 'r') as file:
+            data = json.loads(file.read())
+        self._assert_expect_json(data)
+
+    def test_take_action_no_items(self):
+        temp_file = tempfile.mkstemp()[1]
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(
+            ["-as" + temp_file], )
+        with patch('sys.stdout', new=StringIO()) as mock_stdout:
+            self.test_object.take_action(parsed_args)
+        stdout_data = mock_stdout.getvalue()
+        self._assert_cli_message(stdout_data)
+
+    def test_take_action_exception(self):
+        parser = self.test_object.get_parser('NAME')
+        parsed_args = parser.parse_args(["-s" + self._subunit_file],)
+        with patch('sys.stderr', new=StringIO()) as mock_stderr:
+            with patch('tempest.cmd.subunit_describe_calls.entry_point') \
+                    as mock_method:
+                mock_method.side_effect = OSError()
+                self.assertRaises(OSError, self.test_object.take_action,
+                                  parsed_args)
+                stderr_data = mock_stderr.getvalue()
+
+        self.assertIn("Traceback (most recent call last):", stderr_data)
+        self.assertIn("entry_point(parsed_args)", stderr_data)
diff --git a/tempest/tests/cmd/test_tempest_init.py b/tempest/tests/cmd/test_tempest_init.py
index 9042b12..fce0882 100644
--- a/tempest/tests/cmd/test_tempest_init.py
+++ b/tempest/tests/cmd/test_tempest_init.py
@@ -40,7 +40,7 @@
 
     def test_generate_sample_config(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         init_cmd = init.TempestInit(None, None)
         local_sample_conf_file = os.path.join(etc_dir_path,
@@ -56,7 +56,7 @@
 
     def test_update_local_conf(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         lock_dir = os.path.join(local_dir.path, 'tempest_lock')
         config_path = os.path.join(etc_dir_path, 'tempest.conf')
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 8dbba38..a8a4c0f 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -13,17 +13,14 @@
 #    under the License.
 
 import os
+from unittest import mock
 
 import fixtures
-import mock
 from oslo_serialization import jsonutils as json
 
-from tempest import clients
 from tempest.cmd import init
 from tempest.cmd import verify_tempest_config
-from tempest.common import credentials_factory
 from tempest import config
-from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
 from tempest.lib import exceptions as lib_exc
 from tempest.tests import base
@@ -97,15 +94,15 @@
         self.useFixture(fixtures.MockPatchObject(
             verify_tempest_config, '_get_unversioned_endpoint',
             return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
         fake_resp = json.dumps(fake_resp)
         self.useFixture(fixtures.MockPatch(
             'tempest.lib.common.http.ClosingHttp.request',
             return_value=(None, fake_resp)))
         fake_os = mock.MagicMock()
         versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
-        self.assertIn('v1.0', versions)
         self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
 
     def test_get_nova_versions(self):
         self.useFixture(fixtures.MockPatchObject(
@@ -145,7 +142,7 @@
         self.assertTrue(mock_log_error.called)
 
     def test_verify_api_versions(self):
-        api_services = ['cinder', 'glance', 'keystone']
+        api_services = ['glance', 'keystone']
         fake_os = mock.MagicMock()
         for svc in api_services:
             m = 'verify_%s_api_versions' % svc
@@ -154,7 +151,7 @@
                 verify_mock.assert_called_once_with(fake_os, True)
 
     def test_verify_api_versions_not_implemented(self):
-        api_services = ['cinder', 'glance', 'keystone']
+        api_services = ['glance', 'keystone']
         fake_os = mock.MagicMock()
         for svc in api_services:
             m = 'verify_%s_api_versions' % svc
@@ -178,52 +175,6 @@
                                            'identity-feature-enabled',
                                            False, True)
 
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v3(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v2.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_any_call('api_v3', 'volume-feature-enabled',
-                                   False, True)
-        self.assertEqual(1, print_mock.call_count)
-
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v2(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v3.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_any_call('api_v2', 'volume-feature-enabled',
-                                   False, True)
-        self.assertEqual(1, print_mock.call_count)
-
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v1(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_not_called()
-
     def test_verify_glance_version_no_v2_with_v1_1(self):
         # This test verifies that wrong config api_v2 = True is detected
         class FakeClient(object):
@@ -560,26 +511,27 @@
         self.assertEqual([], results['swift']['extensions'])
 
     def test_get_extension_client(self):
-        creds = credentials_factory.get_credentials(
-            fill_in=False, username='fake_user', project_name='fake_project',
-            password='fake_password')
-        os = clients.Manager(creds)
-        for service in ['nova', 'neutron', 'swift', 'cinder']:
+        fake_os = mock.MagicMock()
+        services = {
+            'nova': fake_os.compute.ExtensionsClient(),
+            'neutron': fake_os.network.ExtensionsClient(),
+            'swift': fake_os.object_storage.CapabilitiesClient(),
+            'cinder': fake_os.volume_v2.ExtensionsClient(),
+        }
+        for service in services.keys():
             extensions_client = verify_tempest_config.get_extension_client(
-                os, service)
-            self.assertIsInstance(extensions_client, rest_client.RestClient)
+                fake_os, service)
+            self.assertIsInstance(extensions_client, mock.MagicMock)
+            self.assertEqual(extensions_client, services[service])
 
     def test_get_extension_client_sysexit(self):
-        creds = credentials_factory.get_credentials(
-            fill_in=False, username='fake_user', project_name='fake_project',
-            password='fake_password')
-        os = clients.Manager(creds)
+        fake_os = mock.MagicMock()
         self.assertRaises(SystemExit,
                           verify_tempest_config.get_extension_client,
-                          os, 'fakeservice')
+                          fake_os, 'fakeservice')
 
     def test_get_config_file(self):
-        conf_dir = os.path.join(os.getcwd(), 'etc/')
+        conf_dir = os.path.join(os.getcwd(), 'etc')
         conf_file = "tempest.conf.sample"
         local_sample_conf_file = os.path.join(conf_dir, conf_file)
 
@@ -629,3 +581,23 @@
     def test_contains_version_negative_data(self):
         self.assertFalse(
             verify_tempest_config.contains_version('v5.', ['v1.0', 'v2.0']))
+
+    def test_check_service_availability(self):
+        class FakeAuthProvider:
+            def get_auth(self):
+                return ('token',
+                        {'serviceCatalog': [{'type': 'compute'},
+                                            {'type': 'image'},
+                                            {'type': 'volumev3'},
+                                            {'type': 'network'},
+                                            {'type': 'object-store'}]})
+
+        class Fake_os:
+            auth_provider = FakeAuthProvider()
+            auth_version = 'v2'
+        verify_tempest_config.CONF._config = fake_config.FakePrivate()
+        services = verify_tempest_config.check_service_availability(
+            Fake_os(), True)
+        self.assertEqual(
+            sorted(['nova', 'glance', 'neutron', 'swift', 'cinder']),
+            sorted(services))
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index 7a6b576..f16d533 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -12,16 +12,13 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+from io import StringIO
 import os
 import shutil
 import subprocess
 import tempfile
+from unittest.mock import patch
 
-from mock import patch
-try:
-    from StringIO import StringIO
-except ImportError:
-    from io import StringIO
 from tempest.cmd import workspace
 from tempest.lib.common.utils import data_utils
 from tempest.tests import base
diff --git a/tempest/tests/common/test_compute.py b/tempest/tests/common/test_compute.py
index c108be9..142bb08 100644
--- a/tempest/tests/common/test_compute.py
+++ b/tempest/tests/common/test_compute.py
@@ -13,9 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from six.moves.urllib import parse as urlparse
+from unittest import mock
 
-import mock
+from urllib import parse as urlparse
+
 
 from tempest.common import compute
 from tempest.tests import base
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 7cf87f8..374474d 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -13,7 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
+
 from oslo_config import cfg
 import testtools
 
@@ -172,10 +173,15 @@
     @mock.patch.object(cf, 'get_credentials')
     def test_get_configured_admin_credentials(self, mock_get_credentials):
         cfg.CONF.set_default('auth_version', 'v3', 'identity')
-        all_params = [('admin_username', 'username', 'my_name'),
-                      ('admin_password', 'password', 'secret'),
-                      ('admin_project_name', 'project_name', 'my_pname'),
-                      ('admin_domain_name', 'domain_name', 'my_dname')]
+        all_params = [
+            ('admin_username', 'username', 'my_name'),
+            ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+            ('admin_password', 'password', 'secret'),
+            ('admin_project_name', 'project_name', 'my_pname'),
+            ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+            ('admin_domain_name', 'domain_name', 'my_dname'),
+            ('admin_system', 'system', None),
+        ]
         expected_result = 'my_admin_credentials'
         mock_get_credentials.return_value = expected_result
         for config_item, _, value in all_params:
@@ -193,10 +199,15 @@
     def test_get_configured_admin_credentials_not_fill_valid(
             self, mock_get_credentials):
         cfg.CONF.set_default('auth_version', 'v2', 'identity')
-        all_params = [('admin_username', 'username', 'my_name'),
-                      ('admin_password', 'password', 'secret'),
-                      ('admin_project_name', 'project_name', 'my_pname'),
-                      ('admin_domain_name', 'domain_name', 'my_dname')]
+        all_params = [
+            ('admin_username', 'username', 'my_name'),
+            ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+            ('admin_password', 'password', 'secret'),
+            ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+            ('admin_project_name', 'project_name', 'my_pname'),
+            ('admin_domain_name', 'domain_name', 'my_dname'),
+            ('admin_system', 'system', None),
+        ]
         expected_result = mock.Mock()
         expected_result.is_valid.return_value = True
         mock_get_credentials.return_value = expected_result
@@ -277,3 +288,20 @@
         mock_auth_get_credentials.assert_called_once_with(
             expected_uri, fill_in=False, identity_version='v3',
             **expected_params)
+
+    @mock.patch('tempest.lib.auth.get_credentials')
+    def test_get_credentials_v3_system(self, mock_auth_get_credentials):
+        expected_uri = 'V3_URI'
+        expected_result = 'my_creds'
+        mock_auth_get_credentials.return_value = expected_result
+        cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
+        cfg.CONF.set_default('admin_system', 'all', 'auth')
+        params = {'system': 'all'}
+        expected_params = params.copy()
+        expected_params.update(config.service_client_config())
+        result = cf.get_credentials(fill_in=False, identity_version='v3',
+                                    **params)
+        self.assertEqual(expected_result, result)
+        mock_auth_get_credentials.assert_called_once_with(
+            expected_uri, fill_in=False, identity_version='v3',
+            **expected_params)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index e3bb836..b76a263 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -13,13 +13,14 @@
 #    under the License.
 
 import time
+from unittest import mock
 
-import mock
 from oslo_utils.fixture import uuidsentinel as uuids
 
 from tempest.common import waiters
 from tempest import exceptions
 from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
 from tempest.lib.services.volume.v2 import volumes_client
 from tempest.tests import base
 import tempest.tests.utils as utils
@@ -55,6 +56,93 @@
                           waiters.wait_for_image_status,
                           self.client, 'fake_image_id', 'active')
 
+    def test_wait_for_image_imported_to_stores(self):
+        self.client.show_image.return_value = ({'status': 'active',
+                                                'stores': 'fake_store'})
+        start_time = int(time.time())
+        waiters.wait_for_image_imported_to_stores(
+            self.client, 'fake_image_id', 'fake_store')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_imported_to_stores_failure(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.OtherRestClientException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_imported_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_copied_to_stores(self):
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': '',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        start_time = int(time.time())
+        waiters.wait_for_image_copied_to_stores(
+            self.client, 'fake_image_id')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_copied_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        self.patch('time.time', side_effect=[0., 1.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': 'processing',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_copied_to_stores,
+                          self.client, 'fake_image_id')
+
+    def test_wait_for_image_tasks_status(self):
+        self.client.show_image_tasks.return_value = ({
+            'tasks': [{'status': 'success'}]})
+        start_time = int(time.time())
+        waiters.wait_for_image_tasks_status(
+            self.client, 'fake_image_id', 'success')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_tasks_status_timeout(self):
+        time_mock = self.patch('time.time')
+        self.patch('time.time', side_effect=[0., 1.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        self.client.show_image_tasks.return_value = ({
+            'tasks': [
+                {'status': 'success'},
+                {'status': 'processing'}]})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_tasks_status,
+                          self.client, 'fake_image_id', 'success')
+
 
 class TestInterfaceWaiters(base.TestCase):
 
@@ -98,39 +186,126 @@
                                          mock.call('server_id', 'port_id')])
         sleep.assert_called_once_with(client.build_interval)
 
-    one_interface = {'interfaceAttachments': [{'port_id': 'port_one'}]}
-    two_interfaces = {'interfaceAttachments': [{'port_id': 'port_one'},
-                                               {'port_id': 'port_two'}]}
-
     def test_wait_for_interface_detach(self):
-        list_interfaces = mock.MagicMock(
-            side_effect=[self.two_interfaces, self.one_interface])
-        client = self.mock_client(list_interfaces=list_interfaces)
+        no_event = {
+            'instanceAction': {
+                'events': []
+            }
+        }
+        one_event_without_result = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': None
+                    }
+
+                ]
+            }
+        }
+        one_event_successful = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': 'Success'
+                    }
+                ]
+            }
+        }
+
+        show_instance_action = mock.MagicMock(
+            # there is an extra call to return the result from the waiter
+            side_effect=[
+                no_event,
+                one_event_without_result,
+                one_event_successful,
+                one_event_successful,
+            ]
+        )
+        client = self.mock_client(show_instance_action=show_instance_action)
         self.patch('time.time', return_value=0.)
         sleep = self.patch('time.sleep')
 
         result = waiters.wait_for_interface_detach(
-            client, 'server_id', 'port_two')
+            client, mock.sentinel.server_id, mock.sentinel.port_id,
+            mock.sentinel.detach_request_id
+        )
 
-        self.assertIs(self.one_interface['interfaceAttachments'], result)
-        list_interfaces.assert_has_calls([mock.call('server_id'),
-                                          mock.call('server_id')])
-        sleep.assert_called_once_with(client.build_interval)
+        self.assertIs(one_event_successful['instanceAction'], result)
+        show_instance_action.assert_has_calls(
+            # there is an extra call to return the result from the waiter
+            [
+                mock.call(
+                    mock.sentinel.server_id, mock.sentinel.detach_request_id)
+            ] * 4
+        )
+        sleep.assert_has_calls([mock.call(client.build_interval)] * 2)
 
     def test_wait_for_interface_detach_timeout(self):
-        list_interfaces = mock.MagicMock(return_value=self.one_interface)
-        client = self.mock_client(list_interfaces=list_interfaces)
+        one_event_without_result = {
+            'instanceAction': {
+                'events': [
+                    {
+                        'event': 'compute_detach_interface',
+                        'result': None
+                    }
+
+                ]
+            }
+        }
+
+        show_instance_action = mock.MagicMock(
+            return_value=one_event_without_result)
+        client = self.mock_client(show_instance_action=show_instance_action)
         self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
         sleep = self.patch('time.sleep')
 
-        self.assertRaises(lib_exc.TimeoutException,
-                          waiters.wait_for_interface_detach,
-                          client, 'server_id', 'port_one')
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_interface_detach,
+            client, mock.sentinel.server_id, mock.sentinel.port_id,
+            mock.sentinel.detach_request_id
+        )
 
-        list_interfaces.assert_has_calls([mock.call('server_id'),
-                                          mock.call('server_id')])
+        show_instance_action.assert_has_calls(
+            [
+                mock.call(
+                    mock.sentinel.server_id, mock.sentinel.detach_request_id)
+            ] * 2
+        )
         sleep.assert_called_once_with(client.build_interval)
 
+    def test_wait_for_guest_os_boot(self):
+        get_console_output = mock.Mock(
+            side_effect=[
+                {'output': 'os not ready yet\n'},
+                {'output': 'login:\n'}
+            ])
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', return_value=0.)
+        sleep = self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        get_console_output.assert_has_calls([
+            mock.call('server_id'), mock.call('server_id')])
+        sleep.assert_called_once_with(client.build_interval)
+        log_info.assert_not_called()
+
+    def test_wait_for_guest_os_boot_timeout(self):
+        get_console_output = mock.Mock(
+            return_value={'output': 'os not ready yet\n'})
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        log_info.assert_called_once()
+
 
 class TestVolumeWaiters(base.TestCase):
     vol_migrating_src_host = {
@@ -234,6 +409,29 @@
                                     mock.call(volume_id)])
         mock_sleep.assert_called_once_with(1)
 
+    def test_wait_for_volume_attachment_create(self):
+        vol_detached = {'volume': {'attachments': []}}
+        vol_attached = {'volume': {'attachments': [
+                       {'id': uuids.volume_id,
+                        'attachment_id': uuids.attachment_id,
+                        'server_id': uuids.server_id,
+                        'volume_id': uuids.volume_id}]}}
+        show_volume = mock.MagicMock(side_effect=[
+            vol_detached, vol_detached, vol_attached])
+        client = mock.Mock(spec=volumes_client.VolumesClient,
+                           build_interval=1,
+                           build_timeout=5,
+                           show_volume=show_volume)
+        self.patch('time.time')
+        self.patch('time.sleep')
+        att = waiters.wait_for_volume_attachment_create(
+            client, uuids.volume_id, uuids.server_id)
+        assert att == vol_attached['volume']['attachments'][0]
+        # Assert that show volume is called until the attachment is removed.
+        show_volume.assert_has_calls([mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id)])
+
     def test_wait_for_volume_attachment(self):
         vol_detached = {'volume': {'attachments': []}}
         vol_attached = {'volume': {'attachments': [
@@ -249,9 +447,9 @@
         waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
                                                   uuids.attachment_id)
         # Assert that show volume is called until the attachment is removed.
-        show_volume.assert_has_calls = [mock.call(uuids.volume_id),
-                                        mock.call(uuids.volume_id),
-                                        mock.call(uuids.volume_id)]
+        show_volume.assert_has_calls([mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id)])
 
     def test_wait_for_volume_attachment_timeout(self):
         show_volume = mock.MagicMock(return_value={
@@ -281,3 +479,110 @@
                                                   uuids.attachment_id)
         # Assert that show volume is only called once before we return
         show_volume.assert_called_once_with(uuids.volume_id)
+
+    def test_wait_for_volume_attachment_remove_from_server(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        volume_not_attached = {"volumeAttachments": []}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_not_attached])
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        waiters.wait_for_volume_attachment_remove_from_server(
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
+
+    def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_attached])
+        mock_get_console_output = mock.Mock(
+            return_value={'output': 'output'})
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments,
+            get_console_output=mock_get_console_output)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_volume_attachment_remove_from_server,
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
+
+        # Assert that we fetch console output
+        mock_get_console_output.assert_called_once_with(uuids.server_id)
+
+    def test_wait_for_volume_attachment_remove_from_server_not_found(self):
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=lib_exc.NotFound)
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            list_volume_attachments=mock_list_volume_attachments)
+
+        # Assert that nothing is raised when lib_exc_NotFound is raised
+        # by the client call to list_volume_attachments
+        waiters.wait_for_volume_attachment_remove_from_server(
+            mock_client, mock.sentinel.server_id, mock.sentinel.volume_id)
+
+        # Assert that list_volume_attachments was actually called
+        mock_list_volume_attachments.assert_called_once_with(
+            mock.sentinel.server_id)
+
+
+class TestServerFloatingIPWaiters(base.TestCase):
+
+    def test_wait_for_server_floating_ip_associate_timeout(self):
+        mock_server = {'server': {'id': 'fake_uuid', 'addresses': {}}}
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_timeout=1, build_interval=1,
+            show_server=lambda id: mock_server)
+
+        fake_server = {'id': 'fake-uuid'}
+        fake_fip = {'floating_ip_address': 'fake_address'}
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_server_floating_ip, mock_client, fake_server,
+            fake_fip)
+
+    def test_wait_for_server_floating_ip_disassociate_timeout(self):
+        mock_addresses = {'shared': [{'OS-EXT-IPS:type': 'floating',
+                                      'addr': 'fake_address'}]}
+        mock_server = {'server': {'id': 'fake_uuid',
+                                  'addresses': mock_addresses}}
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_timeout=1, build_interval=1,
+            show_server=lambda id: mock_server)
+
+        fake_server = {'id': 'fake-uuid'}
+        fake_fip = {'floating_ip_address': 'fake_address'}
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_server_floating_ip, mock_client, fake_server,
+            fake_fip, wait_for_disassociate=True)
diff --git a/tempest/tests/common/utils/test_net_utils.py b/tempest/tests/common/utils/test_net_utils.py
index 83c6bcc..51d86d1 100644
--- a/tempest/tests/common/utils/test_net_utils.py
+++ b/tempest/tests/common/utils/test_net_utils.py
@@ -10,7 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from tempest.common.utils import net_utils
 from tempest.lib import exceptions as lib_exc
diff --git a/tempest/tests/lib/cli/test_execute.py b/tempest/tests/lib/cli/test_execute.py
index c069af5..a10e3bb 100644
--- a/tempest/tests/lib/cli/test_execute.py
+++ b/tempest/tests/lib/cli/test_execute.py
@@ -12,8 +12,8 @@
 #    under the License.
 
 import subprocess
+from unittest import mock
 
-import mock
 
 from tempest.lib.cli import base as cli_base
 from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/tests/lib/cmd/__init__.py
similarity index 100%
rename from tempest/tests/lib/services/volume/v1/__init__.py
rename to tempest/tests/lib/cmd/__init__.py
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
new file mode 100644
index 0000000..edfb2c8
--- /dev/null
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ast
+import importlib
+import os
+import shutil
+import sys
+import tempfile
+from unittest import mock
+
+from tempest.lib.cmd import check_uuid
+from tempest.tests import base
+
+
+class TestCLInterface(base.TestCase):
+    CODE = "import unittest\n" \
+           "class TestClass(unittest.TestCase):\n" \
+           "    def test_tests(self):\n" \
+           "        pass"
+
+    def setUp(self):
+        super(TestCLInterface, self).setUp()
+        self.directory = tempfile.mkdtemp(prefix='check-uuid', dir=".")
+        self.addCleanup(shutil.rmtree, self.directory, ignore_errors=True)
+
+        init_file = open(self.directory + "/__init__.py", "w")
+        init_file.close()
+
+        self.tests_file = self.directory + "/tests.py"
+        with open(self.tests_file, "w") as fake_file:
+            fake_file.write(TestCLInterface.CODE)
+            fake_file.close()
+
+    def test_fix_argument_no(self):
+        sys.argv = [sys.argv[0]] + ["--package",
+                                    os.path.relpath(self.directory)]
+
+        self.assertRaises(SystemExit, check_uuid.run)
+        with open(self.tests_file, "r") as f:
+            self.assertTrue(TestCLInterface.CODE == f.read())
+
+    def test_fix_argument_yes(self):
+
+        sys.argv = [sys.argv[0]] + ["--fix", "--package",
+                                    os.path.relpath(self.directory)]
+        check_uuid.run()
+        with open(self.tests_file, "r") as f:
+            self.assertTrue(TestCLInterface.CODE != f.read())
+
+
+class TestSourcePatcher(base.TestCase):
+    def test_add_patch(self):
+        patcher = check_uuid.SourcePatcher()
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        file_contents = 'first_line\nsecond_line'
+        fake_file.write(file_contents)
+        fake_file.close()
+        patcher.add_patch(fake_file.name, 'patch', 2)
+
+        source_file = patcher.source_files[fake_file.name]
+        self.assertEqual(1, len(patcher.patches))
+        (patch_id, patch), = patcher.patches.items()
+        self.assertEqual(patcher._quote('patch\n'), patch)
+        self.assertEqual('first_line\n{%s:s}second_line' % patch_id,
+                         patcher._unquote(source_file))
+
+    def test_apply_patches(self):
+        fake_file = tempfile.NamedTemporaryFile("w+t")
+        patcher = check_uuid.SourcePatcher()
+        patcher.patches = {'fake-uuid': patcher._quote('patch\n')}
+        patcher.source_files = {
+            fake_file.name: patcher._quote('first_line\n') +
+            '{fake-uuid:s}second_line'}
+        with mock.patch('sys.stdout'):
+            patcher.apply_patches()
+
+        lines = fake_file.read().split('\n')
+        fake_file.close()
+        self.assertEqual(['first_line', 'patch', 'second_line'], lines)
+        self.assertFalse(patcher.patches)
+        self.assertFalse(patcher.source_files)
+
+
+class TestTestChecker(base.TestCase):
+    IMPORT_LINE = "from tempest.lib import decorators\n"
+
+    def _test_add_uuid_to_test(self, source_file):
+        class Fake_test_node():
+            lineno = 1
+            col_offset = 4
+        patcher = check_uuid.SourcePatcher()
+        checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        fake_file.write(source_file)
+        fake_file.close()
+        checker._add_uuid_to_test(patcher, Fake_test_node(), fake_file.name)
+
+        self.assertEqual(1, len(patcher.patches))
+        self.assertEqual(1, len(patcher.source_files))
+        (patch_id, patch), = patcher.patches.items()
+        changed_source_file, = patcher.source_files.values()
+        self.assertEqual('{%s:s}%s' % (patch_id, patcher._quote(source_file)),
+                         changed_source_file)
+        expected_patch_start = patcher._quote(
+            '    ' + check_uuid.DECORATOR_TEMPLATE.split('(')[0])
+        self.assertTrue(patch.startswith(expected_patch_start))
+
+    def test_add_uuid_to_test_def(self):
+        source_file = ("    def test_test():\n"
+                       "        pass")
+        self._test_add_uuid_to_test(source_file)
+
+    def test_add_uuid_to_test_decorator(self):
+        source_file = ("    @decorators.idempotent_id\n"
+                       "    def test_test():\n"
+                       "        pass")
+        self._test_add_uuid_to_test(source_file)
+
+    @staticmethod
+    def get_mocked_ast_object(lineno, col_offset, module, name, object_type):
+        ast_object = mock.Mock(spec=object_type)
+        name_obj = mock.Mock()
+        ast_object.lineno = lineno
+        ast_object.col_offset = col_offset
+        name_obj.name = name
+        ast_object.module = module
+        ast_object.names = [name_obj]
+
+        return ast_object
+
+    def test_add_import_for_test_uuid_no_tempest(self):
+        patcher = check_uuid.SourcePatcher()
+        checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        source_code = "from unittest import mock\n"
+        fake_file.write(source_code)
+        fake_file.close()
+
+        class Fake_src_parsed():
+            body = [TestTestChecker.get_mocked_ast_object(
+                1, 4, 'unittest', 'mock', ast.ImportFrom)]
+
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
+                                          fake_file.name)
+        patcher.apply_patches()
+
+        with open(fake_file.name, "r") as f:
+            expected_result = source_code + '\n' + TestTestChecker.IMPORT_LINE
+            self.assertTrue(expected_result == f.read())
+
+    def test_add_import_for_test_uuid_tempest(self):
+        patcher = check_uuid.SourcePatcher()
+        checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        source_code = "from tempest import a_fake_module\n"
+        fake_file.write(source_code)
+        fake_file.close()
+
+        class Fake_src_parsed:
+            body = [TestTestChecker.get_mocked_ast_object(
+                1, 4, 'tempest', 'a_fake_module', ast.ImportFrom)]
+
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
+                                          fake_file.name)
+        patcher.apply_patches()
+
+        with open(fake_file.name, "r") as f:
+            expected_result = source_code + TestTestChecker.IMPORT_LINE
+            self.assertTrue(expected_result == f.read())
+
+    def test_add_import_no_import(self):
+        patcher = check_uuid.SourcePatcher()
+        patcher.add_patch = mock.Mock()
+        checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        fake_file.close()
+
+        class Fake_src_parsed:
+            body = []
+
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
+                                          fake_file.name)
+
+        self.assertTrue(not patcher.add_patch.called)
diff --git a/tempest/tests/lib/common/test_api_version_utils.py b/tempest/tests/lib/common/test_api_version_utils.py
index b99e8d4..8d5de09 100644
--- a/tempest/tests/lib/common/test_api_version_utils.py
+++ b/tempest/tests/lib/common/test_api_version_utils.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import testtools
 
 from tempest.lib.common import api_version_utils
@@ -31,7 +30,7 @@
                                                            cfg_max_version)
         except testtools.TestCase.skipException as e:
             if not expected_skip:
-                raise testtools.TestCase.failureException(six.text_type(e))
+                raise testtools.TestCase.failureException(str(e))
 
     def test_version_min_in_range(self):
         self._test_version('2.2', '2.10', '2.1', '2.7')
diff --git a/tempest/tests/lib/common/test_cred_client.py b/tempest/tests/lib/common/test_cred_client.py
index 3dff16f..7ea660b 100644
--- a/tempest/tests/lib/common/test_cred_client.py
+++ b/tempest/tests/lib/common/test_cred_client.py
@@ -11,7 +11,7 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import mock
+from unittest import mock
 
 from tempest.lib.common import cred_client
 from tempest.tests import base
@@ -43,6 +43,14 @@
         self.projects_client.delete_tenant.assert_called_once_with(
             'fake_id')
 
+    def test_get_credentials(self):
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            {'name': 'some_project', 'id': 'fake_id'},
+            'password123')
+        self.assertEqual(ret.username, 'some_user')
+        self.assertEqual(ret.project_name, 'some_project')
+
 
 class TestCredClientV3(base.TestCase):
     def setUp(self):
@@ -53,7 +61,7 @@
         self.roles_client = mock.MagicMock()
         self.domains_client = mock.MagicMock()
         self.domains_client.list_domains.return_value = {
-            'domains': [{'id': 'fake_domain_id'}]
+            'domains': [{'id': 'fake_domain_id', 'name': 'some_domain'}]
         }
         self.creds_client = cred_client.V3CredsClient(self.identity_client,
                                                       self.projects_client,
@@ -75,3 +83,49 @@
         self.creds_client.delete_project('fake_id')
         self.projects_client.delete_project.assert_called_once_with(
             'fake_id')
+
+    def test_get_credentials(self):
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            {'name': 'some_project', 'id': 'fake_id'},
+            'password123')
+        self.assertEqual(ret.username, 'some_user')
+        self.assertEqual(ret.project_name, 'some_project')
+        self.assertIsNone(ret.system)
+        self.assertEqual(ret.domain_name, 'some_domain')
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            None,
+            'password123',
+            domain={'name': 'another_domain', 'id': 'another_id'})
+        self.assertEqual(ret.username, 'some_user')
+        self.assertIsNone(ret.project_name)
+        self.assertIsNone(ret.system)
+        self.assertEqual(ret.domain_name, 'another_domain')
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            None,
+            'password123',
+            system={'system': 'all'})
+        self.assertEqual(ret.username, 'some_user')
+        self.assertIsNone(ret.project_name)
+        self.assertEqual(ret.system, {'system': 'all'})
+        self.assertEqual(ret.domain_name, 'some_domain')
+
+    def test_create_user(self):
+        self.users_client.create_user.return_value = {
+            'user': 'a_user'
+        }
+        fake_project = {
+            'id': 'fake_project_id',
+        }
+        res = self.creds_client.create_user('fake_username',
+                                            'fake_password',
+                                            fake_project,
+                                            'fake_email')
+        self.assertEqual('a_user', res)
+        self.users_client.create_user.assert_called_once_with(
+            name='fake_username', password='fake_password',
+            project_id=fake_project['id'],
+            email='fake_email',
+            domain_id='fake_domain_id')
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index 4723458..b4b1b91 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from unittest import mock
+
 import fixtures
-import mock
 from oslo_config import cfg
 
 from tempest.common import credentials_factory as credentials
@@ -110,7 +111,7 @@
                           (200,
                            {'roles': [{'id': id, 'name': name},
                                       {'id': '1', 'name': 'FakeRole'},
-                                      {'id': '2', 'name': 'Member'}]}))))
+                                      {'id': '2', 'name': 'member'}]}))))
         return roles_fix
 
     def _mock_list_2_roles(self):
@@ -139,7 +140,7 @@
             return_value=(rest_client.ResponseBody
                           (200, {'roles': [
                               {'id': '1', 'name': 'FakeRole'},
-                              {'id': '2', 'name': 'Member'}]}))))
+                              {'id': '2', 'name': 'member'}]}))))
         return roles_fix
 
     def _mock_list_ec2_credentials(self, user_id, tenant_id):
@@ -213,6 +214,56 @@
         self.assertEqual(admin_creds.user_id, '1234')
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_admin_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_roles('1234', 'admin')
+        self._mock_user_create('1234', 'fake_alt_admin_user')
+        self._mock_tenant_create('1234', 'fake_alt_admin')
+
+        user_mock = mock.patch.object(self.roles_client.RolesClient,
+                                      'create_user_role_on_project')
+        user_mock.start()
+        self.addCleanup(user_mock.stop)
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            alt_admin_creds = creds.get_project_alt_admin_creds()
+        user_mock.assert_has_calls([
+            mock.call('1234', '1234', '1234')])
+        self.assertEqual(alt_admin_creds.username, 'fake_alt_admin_user')
+        self.assertEqual(alt_admin_creds.project_name, 'fake_alt_admin')
+        # Verify IDs
+        self.assertEqual(alt_admin_creds.project_id, '1234')
+        self.assertEqual(alt_admin_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_member_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_assign_user_role()
+        self._mock_list_role()
+        self._mock_tenant_create('1234', 'fake_alt_member')
+        self._mock_user_create('1234', 'fake_alt_user')
+        alt_member_creds = creds.get_project_alt_member_creds()
+        self.assertEqual(alt_member_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_member_creds.project_name, 'fake_alt_member')
+        # Verify IDs
+        self.assertEqual(alt_member_creds.project_id, '1234')
+        self.assertEqual(alt_member_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_reader_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_assign_user_role()
+        self._mock_list_roles('1234', 'reader')
+        self._mock_tenant_create('1234', 'fake_alt_reader')
+        self._mock_user_create('1234', 'fake_alt_user')
+        alt_reader_creds = creds.get_project_alt_reader_creds()
+        self.assertEqual(alt_reader_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_reader_creds.project_name, 'fake_alt_reader')
+        # Verify IDs
+        self.assertEqual(alt_reader_creds.project_id, '1234')
+        self.assertEqual(alt_reader_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_role_creds(self, MockRestClient):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         self._mock_list_2_roles()
@@ -241,6 +292,100 @@
         self.assertEqual(role_creds.user_id, '1234')
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_project_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+
+        user_mock = mock.patch.object(self.roles_client.RolesClient,
+                                      'create_user_role_on_project')
+        user_mock.start()
+        self.addCleanup(user_mock.stop)
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='project')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn(('1234', '1234', '1234'), args)
+        self.assertIn(('1234', '1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.project_name, 'fake_role_project')
+        # Verify IDs
+        self.assertEqual(role_creds.project_id, '1234')
+        self.assertEqual(role_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def _test_get_same_role_creds_with_project_scope(self, MockRestClient,
+                                                     scope=None):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    def test_get_same_role_creds_with_project_scope(self):
+        self._test_get_same_role_creds_with_project_scope(scope='project')
+
+    def test_get_same_role_creds_with_default_scope(self):
+        self._test_get_same_role_creds_with_project_scope()
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def _test_get_different_role_creds_with_project_scope(
+            self, MockRestClient, scope=None):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Fetch the creds with one role different
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope=scope)
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    def test_get_different_role_creds_with_project_scope(self):
+        self._test_get_different_role_creds_with_project_scope(
+            scope='project')
+
+    def test_get_different_role_creds_with_default_scope(self):
+        self._test_get_different_role_creds_with_project_scope()
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_all_cred_cleanup(self, MockRestClient):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         self._mock_assign_user_role()
@@ -657,6 +802,232 @@
         return project_fix
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn(('1234', '1234'), args)
+        self.assertIn(('1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify system scope
+        self.assertEqual(role_creds.system, 'all')
+        # Verify domain is default
+        self.assertEqual(role_creds.domain_id, 'default')
+        self.assertEqual(role_creds.domain_name, 'Default')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_same_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Verify system scope
+        self.assertEqual(role_creds_new.system, 'all')
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_different_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify system scope
+        self.assertEqual(role_creds.system, 'all')
+        # Fetch the creds with one role different
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope='system')
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Verify Scope
+        self.assertEqual(role_creds_new.system, 'all')
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn((domain['id'], '1234', '1234'), args)
+        self.assertIn((domain['id'], '1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify creds are under new created domain
+        self.assertEqual(role_creds.domain_id, domain['id'])
+        self.assertEqual(role_creds.domain_name, domain['name'])
+        # Verify that Scope is None
+        self.assertIsNone(role_creds.system)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_same_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify Scope
+        self.assertIsNone(role_creds.system)
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_different_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify Scope
+        self.assertIsNone(role_creds.system)
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope='domain')
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_role_creds_with_different_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify Scope
+        self.assertEqual(role_creds.system, 'all')
+
+        # Fetch the same role creds but with different scope
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='project')
+        calls = user_mock1.mock_calls
+        # Because scope is different, assert that the role creation
+        # is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check that created creds are different
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_member_role_creation_with_duplicate(self, rest_client_mock):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         creds.creds_client = mock.MagicMock()
@@ -664,6 +1035,6 @@
         with mock.patch('tempest.lib.common.dynamic_creds.LOG') as log_mock:
             creds._create_creds()
             log_mock.warning.assert_called_once_with(
-                "Member role already exists, ignoring conflict.")
+                "member role already exists, ignoring conflict.")
         creds.creds_client.assign_user_role.assert_called_once_with(
-            mock.ANY, mock.ANY, 'Member')
+            mock.ANY, mock.ANY, 'member')
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 25df2a7..f2131dc 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -12,17 +12,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import os
 import shutil
+from unittest import mock
 
-import mock
-import six
 import testtools
 
 import fixtures
 from oslo_concurrency.fixture import lockutils as lockutils_fixtures
 from oslo_config import cfg
+from oslo_utils.secretutils import md5
 
 from tempest import config
 from tempest.lib import auth
@@ -106,10 +105,10 @@
         hash_fields = (
             preprov_creds.PreProvisionedCredentialProvider.HASH_CRED_FIELDS)
         for account in accounts_list:
-            hash = hashlib.md5()
+            hash = md5(usedforsecurity=False)
             account_for_hash = dict((k, v) for (k, v) in account.items()
                                     if k in hash_fields)
-            hash.update(six.text_type(account_for_hash).encode('utf-8'))
+            hash.update(str(account_for_hash).encode('utf-8'))
             temp_hash = hash.hexdigest()
             hash_list.append(temp_hash)
         return hash_list
@@ -145,8 +144,7 @@
         # Emulate the lock existing on the filesystem
         self.useFixture(fixtures.MockPatch(
             'os.path.isfile', return_value=True))
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             test_account_class = (
                 preprov_creds.PreProvisionedCredentialProvider(
                     **self.fixed_params))
@@ -158,8 +156,7 @@
         # Emulate the lock not existing on the filesystem
         self.useFixture(fixtures.MockPatch(
             'os.path.isfile', return_value=False))
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             test_account_class = (
                 preprov_creds.PreProvisionedCredentialProvider(
                     **self.fixed_params))
@@ -178,7 +175,7 @@
             'os.path.isfile', return_value=False))
         test_account_class = preprov_creds.PreProvisionedCredentialProvider(
             **self.fixed_params)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
+        with mock.patch('builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
             lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
@@ -197,8 +194,7 @@
             'os.path.isfile', return_value=True))
         test_account_class = preprov_creds.PreProvisionedCredentialProvider(
             **self.fixed_params)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             self.assertRaises(lib_exc.InvalidCredentials,
                               test_account_class._get_free_hash, hash_list)
 
@@ -218,7 +214,7 @@
             return True
 
         self.patchobject(os.path, 'isfile', _fake_is_file)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
+        with mock.patch('builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
             lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
diff --git a/tempest/tests/lib/common/test_profiler.py b/tempest/tests/lib/common/test_profiler.py
index 59fa0364..166d831 100644
--- a/tempest/tests/lib/common/test_profiler.py
+++ b/tempest/tests/lib/common/test_profiler.py
@@ -10,7 +10,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
+
 import testtools
 
 from tempest.lib.common import profiler
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index b861582..1dea5f5 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -17,7 +17,6 @@
 import fixtures
 import jsonschema
 from oslo_serialization import jsonutils as json
-import six
 
 from tempest.lib.common import http
 from tempest.lib.common import rest_client
@@ -93,7 +92,7 @@
 class TestRestClientHeadersJSON(TestRestClientHTTPMethods):
 
     def _verify_headers(self, resp):
-        resp = dict((k.lower(), v) for k, v in six.iteritems(resp))
+        resp = dict((k.lower(), v) for k, v in resp.items())
         self.assertEqual(self.header_value, resp['accept'])
         self.assertEqual(self.header_value, resp['content-type'])
 
@@ -145,11 +144,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_get_update_headers(self):
@@ -157,11 +156,11 @@
                                                extra_headers=True,
                                                headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_delete_update_headers(self):
@@ -169,11 +168,11 @@
                                                   extra_headers=True,
                                                   headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_patch_update_headers(self):
@@ -181,11 +180,11 @@
                                                  extra_headers=True,
                                                  headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_put_update_headers(self):
@@ -193,11 +192,11 @@
                                                extra_headers=True,
                                                headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_head_update_headers(self):
@@ -208,11 +207,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
     def test_copy_update_headers(self):
@@ -220,11 +219,11 @@
                                                 extra_headers=True,
                                                 headers=self.headers)
 
-        self.assertDictContainsSubset(
+        self.assertLessEqual(
             {'X-Configuration-Session': 'session_id',
              'Content-Type': 'application/json',
-             'Accept': 'application/json'},
-            return_dict['headers']
+             'Accept': 'application/json'}.items(),
+            return_dict['headers'].items()
         )
 
 
@@ -526,9 +525,11 @@
                           self.rest_client.wait_for_resource_deletion,
                           '1234')
 
-        # time.time() should be called twice, first to start the timer
-        # and then to compute the timedelta
-        self.assertEqual(2, time_mock.call_count)
+        # time.time() should be called 4 times,
+        # 1. Start timer
+        # 2. End timer
+        # 3 & 4. To generate timeout exception message
+        self.assertEqual(4, time_mock.call_count)
 
     def test_wait_for_deletion_with_unimplemented_deleted_method(self):
         self.rest_client.is_resource_deleted = self.original_deleted_method
diff --git a/tempest/tests/lib/common/test_validation_resources.py b/tempest/tests/lib/common/test_validation_resources.py
index d5139f4..d50fd89 100644
--- a/tempest/tests/lib/common/test_validation_resources.py
+++ b/tempest/tests/lib/common/test_validation_resources.py
@@ -11,8 +11,9 @@
 #    See the License for the specific language governing permissions and
 #    limitations under the License.
 
+from unittest import mock
+
 import fixtures
-import mock
 import testtools
 
 from tempest.lib.common import validation_resources as vr
diff --git a/tempest/tests/lib/common/utils/linux/test_remote_client.py b/tempest/tests/lib/common/utils/linux/test_remote_client.py
index 7a21a5f..df23e63 100644
--- a/tempest/tests/lib/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/lib/common/utils/linux/test_remote_client.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from tempest.lib.common import ssh
 from tempest.lib.common.utils.linux import remote_client
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index 865767b..d8e3745 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import time
+from unittest import mock
 
-import mock
 
 from tempest.lib.common import thread
 from tempest.lib.common.utils import test_utils
@@ -74,6 +74,17 @@
         self.assertRaises(ValueError, test_utils.call_and_ignore_notfound_exc,
                           raise_value_error)
 
+    def test_call_and_ignore_notfound_exc_when_serverfault_raised(self):
+        calls = []
+
+        def raise_serverfault():
+            calls.append('call')
+            raise exceptions.ServerFault()
+        self.assertRaises(exceptions.ServerFault,
+                          test_utils.call_and_ignore_notfound_exc,
+                          raise_serverfault)
+        self.assertEqual(3, len(calls))
+
     def test_call_and_ignore_notfound_exc(self):
         m = mock.Mock(return_value=42)
         args, kwargs = (1,), {'1': None}
diff --git a/tempest/tests/lib/services/compute/test_aggregates_client.py b/tempest/tests/lib/services/compute/test_aggregates_client.py
index 674d92a..1448a4d 100644
--- a/tempest/tests/lib/services/compute/test_aggregates_client.py
+++ b/tempest/tests/lib/services/compute/test_aggregates_client.py
@@ -37,7 +37,7 @@
     FAKE_CREATE_AGGREGATE = {
         "aggregate":
         {
-            "name": u'\xf4',
+            "name": '\xf4',
             "availability_zone": None,
             "deleted": False,
             "created_at": "2015-07-21T04:11:18.000000",
@@ -50,7 +50,7 @@
     FAKE_UPDATE_AGGREGATE = {
         "aggregate":
         {
-            "name": u'\xe9',
+            "name": '\xe9',
             "availability_zone": None,
             "deleted": False,
             "created_at": "2015-07-16T03:07:32.000000",
@@ -74,7 +74,7 @@
         "metadata": {
             "availability_zone": "nova"
         },
-        "name": u'\xe9',
+        "name": '\xe9',
         "updated_at": None
     }
 
diff --git a/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py b/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py
new file mode 100644
index 0000000..79855ea
--- /dev/null
+++ b/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py
@@ -0,0 +1,53 @@
+# Copyright 2017 AT&T.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.compute import assisted_volume_snapshots_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestVolumesClient(base.BaseServiceTest):
+
+    FAKE_SNAPSHOT = {
+        "id": "bf7b810c-70df-4c64-88a7-8588f7a6739c",
+        "volumeId": "59f17c4f-66d4-4271-be40-f200523423a9"
+    }
+
+    def setUp(self):
+        super(TestVolumesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = assisted_volume_snapshots_client.\
+            AssistedVolumeSnapshotsClient(fake_auth, 'compute', 'regionOne')
+
+    def _test_create_assisted_volume_snapshot(self, bytes_body=False):
+        kwargs = {"type": "qcow2", "new_file": "fake_name"}
+        self.check_service_client_function(
+            self.client.create_assisted_volume_snapshot,
+            'tempest.lib.common.rest_client.RestClient.post',
+            {"snapshot": self.FAKE_SNAPSHOT},
+            bytes_body, status=200, volume_id=self.FAKE_SNAPSHOT['volumeId'],
+            snapshot_id=self.FAKE_SNAPSHOT['id'], **kwargs)
+
+    def test_create_assisted_volume_snapshot_with_str_body(self):
+        self._test_create_assisted_volume_snapshot()
+
+    def test_create_assisted_volume_snapshot_with_byte_body(self):
+        self._test_create_assisted_volume_snapshot(bytes_body=True)
+
+    def test_delete_assisted_volume_snapshot(self):
+        self.check_service_client_function(
+            self.client.delete_assisted_volume_snapshot,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {}, status=204, volume_id=self.FAKE_SNAPSHOT['volumeId'],
+            snapshot_id=self.FAKE_SNAPSHOT['id'])
diff --git a/tempest/tests/lib/services/compute/test_availability_zone_client.py b/tempest/tests/lib/services/compute/test_availability_zone_client.py
index 6608592..aef98fc 100644
--- a/tempest/tests/lib/services/compute/test_availability_zone_client.py
+++ b/tempest/tests/lib/services/compute/test_availability_zone_client.py
@@ -27,7 +27,7 @@
                     "available": True
                 },
                 "hosts": None,
-                "zoneName": u'\xf4'
+                "zoneName": '\xf4'
             }
         ]
     }
diff --git a/tempest/tests/lib/services/compute/test_base_compute_client.py b/tempest/tests/lib/services/compute/test_base_compute_client.py
index 69e8542..5841ae4 100644
--- a/tempest/tests/lib/services/compute/test_base_compute_client.py
+++ b/tempest/tests/lib/services/compute/test_base_compute_client.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions
diff --git a/tempest/tests/lib/services/compute/test_extensions_client.py b/tempest/tests/lib/services/compute/test_extensions_client.py
index d7e217e..053b84c 100644
--- a/tempest/tests/lib/services/compute/test_extensions_client.py
+++ b/tempest/tests/lib/services/compute/test_extensions_client.py
@@ -27,7 +27,7 @@
             "namespace":
             "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
             "alias": "NMN",
-            "description": u'\u2740(*\xb4\u25e1`*)\u2740'
+            "description": '\u2740(*\xb4\u25e1`*)\u2740'
         }
     }
 
diff --git a/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py b/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
index b0c00f0..6278df4 100644
--- a/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
+++ b/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
@@ -22,8 +22,8 @@
     FAKE_FLOATING_IP_POOLS = {
         "floating_ip_pools":
         [
-            {"name": u'\u3042'},
-            {"name": u'\u3044'}
+            {"name": '\u3042'},
+            {"name": '\u3044'}
         ]
     }
 
diff --git a/tempest/tests/lib/services/compute/test_keypairs_client.py b/tempest/tests/lib/services/compute/test_keypairs_client.py
index ed3b9dd..214d0e5 100644
--- a/tempest/tests/lib/services/compute/test_keypairs_client.py
+++ b/tempest/tests/lib/services/compute/test_keypairs_client.py
@@ -23,7 +23,7 @@
 
     FAKE_KEYPAIR = {"keypair": {
         "public_key": "ssh-rsa foo Generated-by-Nova",
-        "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "name": '\u2740(*\xb4\u25e1`*)\u2740',
         "user_id": "525d55f98980415ba98e634972fa4a10",
         "fingerprint": "76:24:66:49:d7:ca:6e:5c:77:ea:8e:bb:9c:15:5f:98"
         }}
diff --git a/tempest/tests/lib/services/compute/test_networks_client.py b/tempest/tests/lib/services/compute/test_networks_client.py
index 1908b57..5a88671 100644
--- a/tempest/tests/lib/services/compute/test_networks_client.py
+++ b/tempest/tests/lib/services/compute/test_networks_client.py
@@ -31,7 +31,7 @@
         "deleted_at": None,
         "gateway": None,
         "rxtx_base": None,
-        "label": u'30d7',
+        "label": '30d7',
         "priority": None,
         "project_id": None,
         "vpn_private_address": None,
diff --git a/tempest/tests/lib/services/compute/test_quota_classes_client.py b/tempest/tests/lib/services/compute/test_quota_classes_client.py
index 22d8b91..6921365 100644
--- a/tempest/tests/lib/services/compute/test_quota_classes_client.py
+++ b/tempest/tests/lib/services/compute/test_quota_classes_client.py
@@ -29,7 +29,7 @@
         "ram": 51200,
         "floating_ips": 10,
         "key_pairs": 100,
-        "id": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "id": '\u2740(*\xb4\u25e1`*)\u2740',
         "instances": 10,
         "security_group_rules": 20,
         "security_groups": 10,
diff --git a/tempest/tests/lib/services/compute/test_servers_client.py b/tempest/tests/lib/services/compute/test_servers_client.py
index 86f6ad5..a82b255 100644
--- a/tempest/tests/lib/services/compute/test_servers_client.py
+++ b/tempest/tests/lib/services/compute/test_servers_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 
 from tempest.lib.services.compute import base_compute_client
 from tempest.lib.services.compute import servers_client
diff --git a/tempest/tests/lib/services/compute/test_services_client.py b/tempest/tests/lib/services/compute/test_services_client.py
index ba432e3..0c513cc 100644
--- a/tempest/tests/lib/services/compute/test_services_client.py
+++ b/tempest/tests/lib/services/compute/test_services_client.py
@@ -13,8 +13,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 
 from tempest.lib.services.compute import base_compute_client
 from tempest.lib.services.compute import services_client
diff --git a/tempest/tests/lib/services/compute/test_tenant_networks_client.py b/tempest/tests/lib/services/compute/test_tenant_networks_client.py
index f71aad9..a042a1a 100644
--- a/tempest/tests/lib/services/compute/test_tenant_networks_client.py
+++ b/tempest/tests/lib/services/compute/test_tenant_networks_client.py
@@ -22,7 +22,7 @@
     FAKE_NETWORK = {
         "cidr": "None",
         "id": "c2329eb4-cc8e-4439-ac4c-932369309e36",
-        "label": u'\u30d7'
+        "label": '\u30d7'
         }
 
     FAKE_NETWORKS = [FAKE_NETWORK]
diff --git a/tempest/tests/lib/services/identity/v2/test_token_client.py b/tempest/tests/lib/services/identity/v2/test_token_client.py
index 5b4e210..dc14a50 100644
--- a/tempest/tests/lib/services/identity/v2/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v2/test_token_client.py
@@ -12,7 +12,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import mock
+from unittest import mock
+
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/tests/lib/services/identity/v3/test_access_rules_client.py b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
new file mode 100644
index 0000000..71c9cde
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
@@ -0,0 +1,97 @@
+# Copyright 2019 SUSE LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import access_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAccessRulesClient(base.BaseServiceTest):
+    FAKE_LIST_ACCESS_RULES = {
+        "links": {
+            "self": "https://example.com/identity/v3/users/" +
+                    "3e0716ae/access_rules",
+            "previous": None,
+            "next": None
+        },
+        "access_rules": [
+            {
+                "path": "/v2.0/metrics",
+                "links": {
+                    "self": "https://example.com/identity/v3/access_rules/" +
+                            "07d719df00f349ef8de77d542edf010c"
+                },
+                "id": "07d719df00f349ef8de77d542edf010c",
+                "service": "monitoring",
+                "method": "GET"
+            }
+        ]
+    }
+
+    FAKE_ACCESS_RULE_INFO = {
+        "access_rule": {
+            "path": "/v2.0/metrics",
+            "links": {
+                "self": "https://example.com/identity/v3/access_rules/" +
+                        "07d719df00f349ef8de77d542edf010c"
+            },
+            "id": "07d719df00f349ef8de77d542edf010c",
+            "service": "monitoring",
+            "method": "GET"
+        }
+    }
+
+    def setUp(self):
+        super(TestAccessRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = access_rules_client.AccessRulesClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_show_access_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_access_rule,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ACCESS_RULE_INFO,
+            bytes_body,
+            user_id="123456",
+            access_rule_id="5499a186")
+
+    def _test_list_access_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_access_rules,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ACCESS_RULES,
+            bytes_body,
+            user_id="123456")
+
+    def test_show_access_rule_with_str_body(self):
+        self._test_show_access_rule()
+
+    def test_show_access_rule_with_bytes_body(self):
+        self._test_show_access_rule(bytes_body=True)
+
+    def test_list_access_rule_with_str_body(self):
+        self._test_list_access_rules()
+
+    def test_list_access_rule_with_bytes_body(self):
+        self._test_list_access_rules(bytes_body=True)
+
+    def test_delete_access_rule(self):
+        self.check_service_client_function(
+            self.client.delete_access_rule,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123456",
+            access_rule_id="5499a186",
+            status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
index 2774c44..8aed7d7 100644
--- a/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_application_credentials_client.py
@@ -20,78 +20,116 @@
 class TestApplicationCredentialsClient(base.BaseServiceTest):
     FAKE_CREATE_APP_CRED = {
         "application_credential": {
-            "description": "fake application credential",
+            "name": "monitoring",
+            "secret": "rEaqvJka48mpv",
+            "description": "Application credential for monitoring.",
+            "expires_at": "2018-02-27T18:30:59Z",
             "roles": [
+                {"name": "Reader"}
+            ],
+            "access_rules": [
                 {
-                    "id": "c60fdd45",
-                    "domain_id": None,
-                    "name": "Member"
+                    "path": "/v2.0/metrics",
+                    "method": "GET",
+                    "service": "monitoring"
                 }
             ],
-            "expires_at": "2019-02-27T18:30:59.999999Z",
-            "secret": "_BVq0xU5L",
-            "unrestricted": None,
-            "project_id": "ddef321",
-            "id": "5499a186",
-            "name": "one"
+            "unrestricted": False
         }
     }
 
     FAKE_LIST_APP_CREDS = {
+        "links": {
+            "self": "http://example.com/identity/v3/users/" +
+                    "fd786d56402c4d1691372e7dee0d00b5/application_credentials",
+            "previous": None,
+            "next": None
+        },
         "application_credentials": [
             {
-                "description": "fake application credential",
+                "description": "Application credential for backups.",
                 "roles": [
                     {
                         "domain_id": None,
-                        "name": "Member",
-                        "id": "c60fdd45",
+                        "name": "Writer",
+                        "id": "6aff702516544aeca22817fd3bc39683"
                     }
                 ],
-                "expires_at": "2018-02-27T18:30:59.999999Z",
-                "unrestricted": None,
-                "project_id": "ddef321",
-                "id": "5499a186",
-                "name": "one"
+                "access_rules": [
+                ],
+                "links": {
+                    "self": "http://example.com/identity/v3/users/" +
+                            "fd786d56402c4d1691372e7dee0d00b5/" +
+                            "application_credentials/" +
+                            "308a7e905eee4071aac5971744c061f6"
+                },
+                "expires_at": "2018-02-27T18:30:59.000000",
+                "unrestricted": False,
+                "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+                "id": "308a7e905eee4071aac5971744c061f6",
+                "name": "backups"
             },
             {
-                "description": None,
+                "description": "Application credential for monitoring.",
                 "roles": [
                     {
-                        "id": "0f1837c8",
+                        "id": "6aff702516544aeca22817fd3bc39683",
                         "domain_id": None,
-                        "name": "anotherrole"
-                    },
-                    {
-                        "id": "c60fdd45",
-                        "domain_id": None,
-                        "name": "Member"
+                        "name": "Reader"
                     }
                 ],
-                "expires_at": None,
-                "unrestricted": None,
-                "project_id": "c5403d938",
-                "id": "d441c904f",
-                "name": "two"
+                "access_rules": [
+                    {
+                        "path": "/v2.0/metrics",
+                        "id": "07d719df00f349ef8de77d542edf010c",
+                        "service": "monitoring",
+                        "method": "GET"
+                    }
+                ],
+                "links": {
+                    "self": "http://example.com/identity/v3/users/" +
+                            "fd786d56402c4d1691372e7dee0d00b5/" +
+                            "application_credentials/" +
+                            "58d61ff8e6e34accb35874016d1dba8b"
+                },
+                "expires_at": "2018-02-27T18:30:59.000000",
+                "unrestricted": False,
+                "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+                "id": "58d61ff8e6e34accb35874016d1dba8b",
+                "name": "monitoring"
             }
         ]
     }
 
     FAKE_APP_CRED_INFO = {
         "application_credential": {
-            "description": None,
+            "description": "Application credential for monitoring.",
             "roles": [
                 {
+                    "id": "6aff702516544aeca22817fd3bc39683",
                     "domain_id": None,
-                    "name": "Member",
-                    "id": "c60fdd45",
+                    "name": "Reader"
                 }
             ],
-            "expires_at": None,
-            "unrestricted": None,
-            "project_id": "ddef321",
-            "id": "5499a186",
-            "name": "one"
+            "access_rules": [
+                {
+                    "path": "/v2.0/metrics",
+                    "id": "07d719df00f349ef8de77d542edf010c",
+                    "service": "monitoring",
+                    "method": "GET"
+                }
+            ],
+            "links": {
+                "self": "http://example.com/identity/v3/users/" +
+                        "fd786d56402c4d1691372e7dee0d00b5/" +
+                        "application_credentials/" +
+                        "58d61ff8e6e34accb35874016d1dba8b"
+            },
+            "expires_at": "2018-02-27T18:30:59.000000",
+            "unrestricted": False,
+            "project_id": "231c62fb0fbd485b995e8b060c3f0d98",
+            "id": "58d61ff8e6e34accb35874016d1dba8b",
+            "name": "monitoring"
         }
     }
 
@@ -118,7 +156,7 @@
             self.FAKE_APP_CRED_INFO,
             bytes_body,
             user_id="123456",
-            application_credential_id="5499a186")
+            application_credential_id="58d61ff8e6e34accb35874016d1dba8b")
 
     def _test_list_app_creds(self, bytes_body=False):
         self.check_service_client_function(
@@ -152,5 +190,5 @@
             'tempest.lib.common.rest_client.RestClient.delete',
             {},
             user_id="123456",
-            application_credential_id="5499a186",
+            application_credential_id="58d61ff8e6e34accb35874016d1dba8b",
             status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py b/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
index 7faf6a0..e5f7a66 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
@@ -83,6 +83,36 @@
         }
     }
 
+    FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT = {
+        "endpoint_groups": [
+            {
+                "endpoint_group": {
+                    "description": "endpoint group description #2",
+                    "filters": {
+                        "interface": "admin"
+                    },
+                    "id": "3de68c",
+                    "name": "endpoint group name #2"
+                }
+            }
+            ],
+        "links": {
+            "self": "https://url/identity/v3/OS-EP-FILTER/endpoint_groups",
+        }
+    }
+
+    FAKE_PROJECT_INFO = {
+        "project": {
+            "domain_id": "1789d1",
+            "id": "263fd9",
+            "links": {
+                "self": "http://example.com/identity/v3/projects/263fd9"
+            },
+            "name": "project name #1",
+            "description": "project description #1"
+        }
+    }
+
     def setUp(self):
         super(TestEndPointsFilterClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -137,6 +167,52 @@
             project_id=3,
             endpoint_id=4)
 
+    def _test_list_endpoint_groups_for_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoint_groups_for_project,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT,
+            bytes_body,
+            status=200,
+            project_id=3)
+
+    def _test_list_projects_for_endpoint_group(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_projects_for_endpoint_group,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_PROJECTS_FOR_ENDPOINTS,
+            bytes_body,
+            status=200,
+            endpoint_group_id=5)
+
+    def _test_list_endpoints_for_endpoint_group(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoints_for_endpoint_group,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ENDPOINTS_FOR_PROJECTS,
+            bytes_body,
+            status=200,
+            endpoint_group_id=5)
+
+    def _test_add_endpoint_group_to_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.add_endpoint_group_to_project,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            status=204,
+            endpoint_group_id=5,
+            project_id=6)
+
+    def _test_show_endpoint_group_for_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_endpoint_group_for_project,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROJECT_INFO,
+            bytes_body,
+            endpoint_group_id=5,
+            project_id=6)
+
     def test_add_endpoint_to_project_with_str_body(self):
         self._test_add_endpoint_to_project()
 
@@ -163,3 +239,43 @@
 
     def test_delete_endpoint_from_project(self):
         self._test_delete_endpoint_from_project()
+
+    def test_list_endpoint_groups_for_project_with_str_body(self):
+        self._test_list_endpoint_groups_for_project()
+
+    def test_list_endpoint_groups_for_project_with_bytes_body(self):
+        self._test_list_endpoint_groups_for_project(bytes_body=True)
+
+    def test_list_projects_for_endpoint_group_with_str_body(self):
+        self._test_list_projects_for_endpoint_group()
+
+    def test_list_projects_for_endpoint_group_with_bytes_body(self):
+        self._test_list_projects_for_endpoint_group(bytes_body=True)
+
+    def test_list_endpoints_for_endpoint_group_with_str_body(self):
+        self._test_list_endpoints_for_endpoint_group()
+
+    def test_list_endpoints_for_endpoint_group_with_bytes_body(self):
+        self._test_list_endpoints_for_endpoint_group(bytes_body=True)
+
+    def test_add_endpoint_group_to_project_with_str_body(self):
+        self._test_add_endpoint_group_to_project()
+
+    def test_add_endpoint_group_to_project_with_bytes_body(self):
+        self._test_add_endpoint_group_to_project(bytes_body=True)
+
+    def test_show_endpoint_group_for_project_with_str_body(self):
+        self._test_show_endpoint_group_for_project()
+
+    def test_show_endpoint_group_for_project_with_bytes_body(self):
+        self._test_show_endpoint_group_for_project(bytes_body=True)
+
+    def test_delete_endpoint_group_from_project(self):
+        self.check_service_client_function(
+            self.client.delete_endpoint_group_from_project,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            False,
+            status=204,
+            endpoint_group_id=5,
+            project_id=5)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
index ca15dd1..0efc462 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
@@ -54,12 +54,44 @@
     }
 
     FAKE_SERVICE_ID = "a4dc5060-f757-4662-b658-edd2aefbb41d"
+    FAKE_ENDPOINT_ID = "b335d394-cdb9-4519-b95d-160b7706e54ew"
+
+    FAKE_UPDATE_ENDPOINT = {
+        "endpoint": {
+            "id": "828384",
+            "interface": "internal",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region_id": "north",
+            "service_id": "686766",
+            "url": "http://example.com/identity/v3/"
+                   "endpoints/828384"
+        }
+    }
+
+    FAKE_SHOW_ENDPOINT = {
+        "endpoint": {
+            "enabled": True,
+            "id": "01c3d5b92f7841ac83fb4b26173c12c7",
+            "interface": "admin",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region": "RegionOne",
+            "region_id": "RegionOne",
+            "service_id": "3b2d6ad7e02c4cde8498a547601f1b8f",
+            "url": "http://23.253.211.234:9696/"
+        }
+    }
 
     def setUp(self):
         super(TestEndpointsClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = endpoints_client.EndPointsClient(fake_auth,
-                                                       'identity', 'regionOne')
+        self.client = endpoints_client.EndPointsClient(
+            fake_auth, 'identity', 'regionOne')
 
     def _test_create_endpoint(self, bytes_body=False):
         self.check_service_client_function(
@@ -84,6 +116,38 @@
             mock_args=[mock_args],
             **params)
 
+    def _test_update_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_endpoint,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_UPDATE_ENDPOINT,
+            bytes_body,
+            endpoint_id=self.FAKE_ENDPOINT_ID,
+            interface="public",
+            region_id="north",
+            url="http://example.com/identity/v3/endpoints/828384",
+            service_id=self.FAKE_SERVICE_ID)
+
+    def _test_show_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_endpoint,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_ENDPOINT,
+            bytes_body,
+            endpoint_id="3456")
+
+    def test_update_endpoint_with_str_body(self):
+        self._test_update_endpoint()
+
+    def test_update_endpoint_with_bytes_body(self):
+        self._test_update_endpoint(bytes_body=True)
+
+    def test_show_endpoint_with_str_body(self):
+        self._test_show_endpoint()
+
+    def test_show_endpoint_with_bytes_body(self):
+        self._test_show_endpoint(bytes_body=True)
+
     def test_create_endpoint_with_str_body(self):
         self._test_create_endpoint()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
new file mode 100644
index 0000000..964c51b
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
@@ -0,0 +1,142 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import identity_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestIdentityProvidersClient(base.BaseServiceTest):
+    FAKE_IDENTITY_PROVIDERS_INFO = {
+        "identity_providers": [
+            {
+                "domain_id": "FAKE_DOMAIN_ID",
+                "description": "FAKE IDENTITY PROVIDER",
+                "remote_ids": ["fake_id_1", "fake_id_2"],
+                "enabled": True,
+                "id": "FAKE_ID",
+                "links": {
+                    "protocols": "http://example.com/identity/v3/" +
+                                 "OS-FEDERATION/identity_providers/" +
+                                 "FAKE_ID/protocols",
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "identity_providers/FAKE_ID"
+                }
+            }
+        ],
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "identity_providers"
+        }
+    }
+
+    FAKE_IDENTITY_PROVIDER_INFO = {
+        "identity_provider": {
+            "authorization_ttl": None,
+            "domain_id": "FAKE_DOMAIN_ID",
+            "description": "FAKE IDENTITY PROVIDER",
+            "remote_ids": ["fake_id_1", "fake_id_2"],
+            "enabled": True,
+            "id": "ACME",
+            "links": {
+                "protocols": "http://example.com/identity/v3/OS-FEDERATION/" +
+                             "identity_providers/FAKE_ID/protocols",
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID"
+            }
+        }
+    }
+
+    def setUp(self):
+        super(TestIdentityProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = identity_providers_client.IdentityProvidersClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_register_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.register_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=201)
+
+    def _test_list_identity_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_identity_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_IDENTITY_PROVIDERS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_get_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=200)
+
+    def _test_delete_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=204)
+
+    def _test_update_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=200)
+
+    def test_register_identity_provider_with_str_body(self):
+        self._test_register_identity_provider()
+
+    def test_register_identity_provider_with_bytes_body(self):
+        self._test_register_identity_provider(bytes_body=True)
+
+    def test_list_identity_providers_with_str_body(self):
+        self._test_list_identity_providers()
+
+    def test_list_identity_providers_with_bytes_body(self):
+        self._test_list_identity_providers(bytes_body=True)
+
+    def test_get_identity_provider_with_str_body(self):
+        self._test_get_identity_provider()
+
+    def test_get_identity_provider_with_bytes_body(self):
+        self._test_get_identity_provider(bytes_body=True)
+
+    def test_delete_identity_provider_with_str_body(self):
+        self._test_delete_identity_provider()
+
+    def test_delete_identity_provider_with_bytes_body(self):
+        self._test_delete_identity_provider(bytes_body=True)
+
+    def test_update_identity_provider_with_str_body(self):
+        self._test_update_identity_provider()
+
+    def test_update_identity_provider_with_bytes_body(self):
+        self._test_update_identity_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_limit_client.py b/tempest/tests/lib/services/identity/v3/test_limit_client.py
new file mode 100644
index 0000000..07ec6cd
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_limit_client.py
@@ -0,0 +1,82 @@
+# Copyright 2021 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import limits_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLimitsClient(base.BaseServiceTest):
+    def setUp(self):
+        super(TestLimitsClient, self).setUp()
+        self.client = limits_client.LimitsClient(
+            fake_auth_provider.FakeAuthProvider(),
+            'identity', 'regionOne')
+
+    def test_get_registered_limits(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.get_registered_limits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            fake_result,
+            False,
+            status=200)
+
+    def test_create_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            description='Spacely Widgets',
+            status=201)
+
+    def test_create_limit_with_domain(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.create_limit,
+            'tempest.lib.common.rest_client.RestClient.post',
+            fake_result,
+            False,
+            region_id='regionOne', service_id='image',
+            project_id='project', resource_name='widgets',
+            default_limit=10,
+            domain_id='foo',
+            description='Spacely Widgets',
+            status=201)
+
+    def test_update_limit(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            status=200)
+
+    def test_update_limit_with_description(self):
+        fake_result = {'foo': 'bar'}
+        self.check_service_client_function(
+            self.client.update_limit,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            fake_result,
+            False,
+            limit_id='123', resource_limit=20,
+            description='new description',
+            status=200)
diff --git a/tempest/tests/lib/services/identity/v3/test_mappings_client.py b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
new file mode 100644
index 0000000..845a3f9
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
@@ -0,0 +1,183 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import mappings_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestMappingsClient(base.BaseServiceTest):
+    FAKE_MAPPING_INFO = {
+        "mapping": {
+            "id": "fake123",
+            "links": {
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "mappings/fake123"
+            },
+            "rules": [
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "name": "{0}"
+                            }
+                        },
+                        {
+                            "group": {
+                                "id": "0cd5e9"
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "UserName"
+                        },
+                        {
+                            "type": "orgPersonType",
+                            "not_any_of": [
+                                "Contractor",
+                                "Guest"
+                            ]
+                        }
+                    ]
+                }
+            ]
+        }
+    }
+
+    FAKE_MAPPINGS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/mappings"
+        },
+        "mappings": [
+            {
+                "id": "fake123",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "mappings/fake123"
+                },
+                "rules": [
+                    {
+                        "local": [
+                            {
+                                "user": {
+                                    "name": "{0}"
+                                }
+                            },
+                            {
+                                "group": {
+                                    "id": "0cd5e9"
+                                }
+                            }
+                        ],
+                        "remote": [
+                            {
+                                "type": "UserName"
+                            },
+                            {
+                                "type": "orgPersonType",
+                                "any_one_of": [
+                                    "Contractor",
+                                    "SubContractor"
+                                ]
+                            }
+                        ]
+                    }
+                ]
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestMappingsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = mappings_client.MappingsClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_create_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_mapping,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=201)
+
+    def _test_get_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_mapping,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=200)
+
+    def _test_update_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_mapping,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=200)
+
+    def _test_list_mappings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_mappings,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_MAPPINGS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_delete_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_mapping,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            mapping_id="fake123",
+            status=204)
+
+    def test_create_mapping_with_str_body(self):
+        self._test_create_mapping()
+
+    def test_create_mapping_with_bytes_body(self):
+        self._test_create_mapping(bytes_body=True)
+
+    def test_get_mapping_with_str_body(self):
+        self._test_get_mapping()
+
+    def test_get_mapping_with_bytes_body(self):
+        self._test_get_mapping(bytes_body=True)
+
+    def test_update_mapping_with_str_body(self):
+        self._test_update_mapping()
+
+    def test_update_mapping_with_bytes_body(self):
+        self._test_update_mapping(bytes_body=True)
+
+    def test_list_mappings_with_str_body(self):
+        self._test_list_mappings()
+
+    def test_list_mappings_with_bytes_body(self):
+        self._test_list_mappings(bytes_body=True)
+
+    def test_delete_mapping_with_str_body(self):
+        self._test_delete_mapping()
+
+    def test_delete_mapping_with_bytes_body(self):
+        self._test_delete_mapping(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_policies_client.py b/tempest/tests/lib/services/identity/v3/test_policies_client.py
index 0237475..4fc800a 100644
--- a/tempest/tests/lib/services/identity/v3/test_policies_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_policies_client.py
@@ -44,6 +44,34 @@
             }
         }
 
+    FAKE_ENDPOINT_INFO = {
+        "endpoints": [
+            {
+                "id": "1",
+                "interface": "public",
+                "links": {
+                    "self": "http://example.com/identity/v3/endpoints/1"
+                },
+                "region": "north",
+                "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596",
+                "url": "http://example.com/identity/"
+            },
+            {
+                "id": "1",
+                "interface": "internal",
+                "links": {
+                    "self": "http://example.com/identity/v3/endpoints/1"
+                },
+                "region": "south",
+                "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596",
+                "url": "http://example.com/identity/"
+            }
+        ],
+        "links": {
+            "self": "http://exmp.com/identity/v3/OS-ENDPOINT-POLICY/policies/1"
+        }
+    }
+
     FAKE_LIST_POLICIES = {
         "links": {
             "next": None,
@@ -238,3 +266,33 @@
             service_id=self.FAKE_SERVICE_ID,
             region_id=self.FAKE_REGION_ID,
             status=204)
+
+    def _test_list_endpoints_for_policy(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoints_for_policy,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ENDPOINT_INFO,
+            bytes_body,
+            policy_id=self.FAKE_POLICY_ID,
+            status=200)
+
+    def test_list_endpoints_for_policy_with_str_body(self):
+        self._test_list_endpoints_for_policy()
+
+    def test_list_endpoints_for_policy_with_bytes_body(self):
+        self._test_list_endpoints_for_policy(bytes_body=True)
+
+    def _test_list_policy_for_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_policy_for_endpoint,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_POLICY_INFO,
+            bytes_body,
+            endpoint_id=self.FAKE_ENDPOINT_ID,
+            status=200)
+
+    def test_list_policy_for_endpoint_with_str_body(self):
+        self._test_list_policy_for_endpoint()
+
+    def test_list_policy_for_endpoint_with_bytes_body(self):
+        self._test_list_policy_for_endpoint(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_protocols_client.py b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
new file mode 100644
index 0000000..c1d04f4
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
@@ -0,0 +1,140 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import protocols_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestProtocolsClient(base.BaseServiceTest):
+    FAKE_PROTOCOLS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "identity_providers/FAKE_ID/protocols"
+        },
+        "protocols": [
+            {
+                "id": "fake_id1",
+                "links": {
+                    "identity_provider": "http://example.com/identity/v3/" +
+                                         "OS-FEDERATION/identity_providers/" +
+                                         "FAKE_ID",
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/"
+                            "identity_providers/FAKE_ID/protocols/fake_id1"
+                },
+                "mapping_id": "fake123"
+            }
+        ]
+    }
+
+    FAKE_PROTOCOL_INFO = {
+        "protocol": {
+            "id": "fake_id1",
+            "links": {
+                "identity_provider": "http://example.com/identity/v3/OS-" +
+                                     "FEDERATION/identity_providers/FAKE_ID",
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID/protocols/fake_id1"
+            },
+            "mapping_id": "fake123"
+        }
+    }
+
+    def setUp(self):
+        super(TestProtocolsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = protocols_client.ProtocolsClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_add_protocol_to_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.add_protocol_to_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=201)
+
+    def _test_list_protocols_of_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_protocols_of_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROTOCOLS_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            status=200)
+
+    def _test_get_protocol_for_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_protocol_for_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=200)
+
+    def _test_update_mapping_for_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_mapping_for_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=200)
+
+    def _test_delete_protocol_from_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_protocol_from_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=204)
+
+    def test_add_protocol_to_identity_provider_with_str_body(self):
+        self._test_add_protocol_to_identity_provider()
+
+    def test_add_protocol_to_identity_provider_with_bytes_body(self):
+        self._test_add_protocol_to_identity_provider(bytes_body=True)
+
+    def test_list_protocols_of_identity_provider_with_str_body(self):
+        self._test_list_protocols_of_identity_provider()
+
+    def test_list_protocols_of_identity_provider_with_bytes_body(self):
+        self._test_list_protocols_of_identity_provider(bytes_body=True)
+
+    def test_get_protocol_for_identity_provider_with_str_body(self):
+        self._test_get_protocol_for_identity_provider()
+
+    def test_get_protocol_for_identity_provider_with_bytes_body(self):
+        self._test_get_protocol_for_identity_provider(bytes_body=True)
+
+    def test_update_mapping_for_identity_provider_with_str_body(self):
+        self._test_update_mapping_for_identity_provider()
+
+    def test_update_mapping_for_identity_provider_with_bytes_body(self):
+        self._test_update_mapping_for_identity_provider(bytes_body=True)
+
+    def test_delete_protocol_from_identity_provider_with_str_body(self):
+        self._test_delete_protocol_from_identity_provider()
+
+    def test_delete_protocol_from_identity_provider_with_bytes_body(self):
+        self._test_delete_protocol_from_identity_provider(bytes_body=False)
diff --git a/tempest/tests/lib/services/identity/v3/test_roles_client.py b/tempest/tests/lib/services/identity/v3/test_roles_client.py
index 8d6bb42..e963310 100644
--- a/tempest/tests/lib/services/identity/v3/test_roles_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_roles_client.py
@@ -225,6 +225,16 @@
             role_id="1234",
             status=204)
 
+    def _test_create_user_role_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_user_role_on_system,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def _test_list_user_roles_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.list_user_roles_on_project,
@@ -243,6 +253,14 @@
             domain_id="b344506af7644f6794d9cb316600b020",
             user_id="123")
 
+    def _test_list_user_roles_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_user_roles_on_system,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ROLES,
+            bytes_body,
+            user_id="123")
+
     def _test_create_group_role_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_group_role_on_project,
@@ -265,6 +283,16 @@
             role_id="1234",
             status=204)
 
+    def _test_create_group_role_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_group_role_on_system,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def _test_list_group_roles_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.list_group_roles_on_project,
@@ -283,6 +311,15 @@
             domain_id="b344506af7644f6794d9cb316600b020",
             group_id="123")
 
+    def _test_list_group_roles_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_group_roles_on_system,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ROLES,
+            bytes_body,
+            domain_id="b344506af7644f6794d9cb316600b020",
+            group_id="123")
+
     def _test_create_role_inference_rule(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_role_inference_rule,
@@ -405,6 +442,15 @@
             role_id="1234",
             status=204)
 
+    def test_delete_role_from_user_on_system(self):
+        self.check_service_client_function(
+            self.client.delete_role_from_user_on_system,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def test_delete_role_from_group_on_project(self):
         self.check_service_client_function(
             self.client.delete_role_from_group_on_project,
@@ -425,6 +471,15 @@
             role_id="1234",
             status=204)
 
+    def test_delete_role_from_group_on_system(self):
+        self.check_service_client_function(
+            self.client.delete_role_from_group_on_system,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def test_check_user_role_existence_on_project(self):
         self.check_service_client_function(
             self.client.check_user_role_existence_on_project,
@@ -445,6 +500,15 @@
             role_id="1234",
             status=204)
 
+    def test_check_user_role_existence_on_system(self):
+        self.check_service_client_function(
+            self.client.check_user_role_existence_on_system,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def test_check_role_from_group_on_project_existence(self):
         self.check_service_client_function(
             self.client.check_role_from_group_on_project_existence,
@@ -465,6 +529,15 @@
             role_id="1234",
             status=204)
 
+    def test_check_role_from_group_on_system_existence(self):
+        self.check_service_client_function(
+            self.client.check_role_from_group_on_system_existence,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def test_create_role_inference_rule_with_str_body(self):
         self._test_create_role_inference_rule()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_service_providers_client.py b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
new file mode 100644
index 0000000..ec908bc
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
@@ -0,0 +1,157 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import service_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServiceProvidersClient(base.BaseServiceTest):
+    FAKE_SERVICE_PROVIDER_INFO = {
+        "service_provider": {
+            "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID/protocols/fake_id1/auth",
+            "description": "Fake Service Provider",
+            "enabled": True,
+            "id": "FAKE_ID",
+            "links": {
+                "self": "https://example.com/identity/v3/OS-FEDERATION/" +
+                        "service_providers/FAKE_ID"
+            },
+            "relay_state_prefix": "ss:mem:",
+            "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+                      "FAKE_ID1/ECP"
+        }
+    }
+
+    FAKE_SERVICE_PROVIDERS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "service_providers"
+        },
+        "service_providers": [
+            {
+                "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+                            "identity_providers/acme/protocols/saml2/auth",
+                "description": "Stores ACME identities",
+                "enabled": True,
+                "id": "ACME",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "service_providers/ACME"
+                },
+                "relay_state_prefix": "ss:mem:",
+                "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+                          "SAML2/ECP"
+            },
+            {
+                "auth_url": "https://other.example.com/identity/v3/" +
+                            "OS-FEDERATION/identity_providers/acme/" +
+                            "protocols/saml2/auth",
+                "description": "Stores contractor identities",
+                "enabled": False,
+                "id": "ACME-contractors",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "service_providers/ACME-contractors"
+                },
+                "relay_state_prefix": "ss:mem:",
+                "sp_url": "https://other.example.com/identity/Shibboleth" +
+                          ".sso/SAML2/ECP"
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestServiceProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = service_providers_client.ServiceProvidersClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_register_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.register_service_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=201)
+
+    def _test_list_service_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_service_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SERVICE_PROVIDERS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_get_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_service_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=200)
+
+    def _test_delete_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_service_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=204)
+
+    def _test_update_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_service_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=200)
+
+    def test_register_service_provider_with_str_body(self):
+        self._test_register_service_provider()
+
+    def test_register_service_provider_with_bytes_body(self):
+        self._test_register_service_provider(bytes_body=True)
+
+    def test_list_service_providers_with_str_body(self):
+        self._test_list_service_providers()
+
+    def test_list_service_providers_with_bytes_body(self):
+        self._test_list_service_providers(bytes_body=True)
+
+    def test_get_service_provider_with_str_body(self):
+        self._test_get_service_provider()
+
+    def test_get_service_provider_with_bytes_body(self):
+        self._test_get_service_provider(bytes_body=True)
+
+    def test_delete_service_provider_with_str_body(self):
+        self._test_delete_service_provider()
+
+    def test_delete_service_provider_with_bytes_body(self):
+        self._test_delete_service_provider(bytes_body=True)
+
+    def test_update_service_provider_with_str_body(self):
+        self._test_update_service_provider()
+
+    def test_update_service_provider_with_bytes_body(self):
+        self._test_update_service_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_token_client.py b/tempest/tests/lib/services/identity/v3/test_token_client.py
index 656e10a..1c2295d 100644
--- a/tempest/tests/lib/services/identity/v3/test_token_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_token_client.py
@@ -12,7 +12,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import mock
+from unittest import mock
+
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/tests/lib/services/identity/v3/test_trusts_client.py b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
index a1ca020..33dca7d 100644
--- a/tempest/tests/lib/services/identity/v3/test_trusts_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
@@ -94,6 +94,35 @@
             }
         }
 
+    FAKE_LIST_TRUSTS_ROLES = {
+        "roles": [
+            {
+                "id": "c1648e",
+                "links": {
+                    "self": "http://example.com/identity/v3/roles/c1648e"
+                    },
+                "name": "manager"
+                },
+            {
+                "id": "ed7b78",
+                "links": {
+                    "self": "http://example.com/identity/v3/roles/ed7b78"
+                    },
+                "name": "member"
+                }
+            ]
+        }
+
+    FAKE_TRUST_ROLE = {
+        "role": {
+            "id": "c1648e",
+            "links": {
+                "self": "http://example.com/identity/v3/roles/c1648e"
+                },
+            "name": "manager"
+            }
+        }
+
     def setUp(self):
         super(TestTrustsClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -123,6 +152,43 @@
             self.FAKE_LIST_TRUSTS,
             bytes_body)
 
+    def _test_list_trust_roles(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_trust_roles,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_TRUSTS_ROLES,
+            bytes_body,
+            trust_id="1ff900")
+
+    def test_check_trust_role(self):
+        self.check_service_client_function(
+            self.client.check_trust_role,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            trust_id="1ff900",
+            role_id="ed7b78")
+
+    def _check_show_trust_role(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_trust_role,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_TRUST_ROLE,
+            bytes_body,
+            trust_id="1ff900",
+            role_id="ed7b78")
+
+    def test_list_trust_roles_with_str_body(self):
+        self._test_list_trust_roles()
+
+    def test_list_trust_roles_with_bytes_body(self):
+        self._test_list_trust_roles(bytes_body=True)
+
+    def test_check_show_trust_role_with_str_body(self):
+        self._check_show_trust_role()
+
+    def test_check_show_trust_role_with_bytes_body(self):
+        self._check_show_trust_role(bytes_body=True)
+
     def test_create_trust_with_str_body(self):
         self._test_create_trust()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_users_client.py b/tempest/tests/lib/services/identity/v3/test_users_client.py
index c0dfdae..7be0480 100644
--- a/tempest/tests/lib/services/identity/v3/test_users_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_users_client.py
@@ -141,6 +141,35 @@
         ]
     }
 
+    FAKE_USER_EC2_CREDENTIAL_INFO = {
+        "credential": {
+            'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+            'access': '79abf59acc77492a86170cbe2f1feafa',
+            'secret': 'c4e7d3a691fd4563873d381a40320f46',
+            'trust_id': None,
+            'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+        }
+    }
+
+    FAKE_LIST_USER_EC2_CREDENTIALS = {
+        "credentials": [
+            {
+                'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+                'access': '79abf59acc77492a86170cbe2f1feafa',
+                'secret': 'c4e7d3a691fd4563873d381a40320f46',
+                'trust_id': None,
+                'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+            },
+            {
+                'user_id': '3beb0e12f3e5416db8d7cccfc785de4r',
+                'access': '45abf59acc77492a86170cbe2f1fesde',
+                'secret': 'g4e7d3a691fd4563873d381a40320e45',
+                'trust_id': None,
+                'tenant_id': '123557269d7b4dd78631a602eb9f112f'
+            }
+        ]
+    }
+
     def setUp(self):
         super(TestUsersClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -201,6 +230,33 @@
             user_id='817fb3c23fd7465ba6d7fe1b1320121d',
         )
 
+    def _test_create_user_ec2_credential(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.post',
+            self.FAKE_USER_EC2_CREDENTIAL_INFO,
+            bytes_body,
+            status=201,
+            user_id="1",
+            tenant_id="123")
+
+    def _test_show_user_ec2_credential(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_USER_EC2_CREDENTIAL_INFO,
+            bytes_body,
+            user_id="1",
+            access="123")
+
+    def _test_list_user_ec2_credentials(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_user_ec2_credentials,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_USER_EC2_CREDENTIALS,
+            bytes_body,
+            user_id="1")
+
     def test_create_user_with_string_body(self):
         self._test_create_user()
 
@@ -255,3 +311,30 @@
             user_id='817fb3c23fd7465ba6d7fe1b1320121d',
             password='NewTempestPassword',
             original_password='OldTempestPassword')
+
+    def test_create_user_ec2_credential_with_str_body(self):
+        self._test_create_user_ec2_credential()
+
+    def test_create_user_ec2_credential_with_bytes_body(self):
+        self._test_create_user_ec2_credential(bytes_body=True)
+
+    def test_show_user_ec2_credential_with_str_body(self):
+        self._test_show_user_ec2_credential()
+
+    def test_show_user_ec2_credential_with_bytes_body(self):
+        self._test_show_user_ec2_credential(bytes_body=True)
+
+    def test_list_user_ec2_credentials_with_str_body(self):
+        self._test_list_user_ec2_credentials()
+
+    def test_list_user_ec2_credentials_with_bytes_body(self):
+        self._test_list_user_ec2_credentials(bytes_body=True)
+
+    def test_delete_user_ec2_credential(self):
+        self.check_service_client_function(
+            self.client.delete_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123",
+            access="1234",
+            status=204)
diff --git a/tempest/tests/lib/services/image/v2/test_images_client.py b/tempest/tests/lib/services/image/v2/test_images_client.py
index ee4d4cb..5b162f8 100644
--- a/tempest/tests/lib/services/image/v2/test_images_client.py
+++ b/tempest/tests/lib/services/image/v2/test_images_client.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.lib.common.utils import data_utils
 from tempest.lib.services.image.v2 import images_client
@@ -35,14 +35,19 @@
         "created_at": "2012-08-10T19:23:50Z",
         "updated_at": "2012-08-12T11:11:33Z",
         "self": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea",
-        "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file",
+        "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927"
+                "dea/file",
         "schema": "/v2/schemas/image",
         "owner": None,
         "min_ram": None,
         "min_disk": None,
         "disk_format": None,
         "virtual_size": None,
-        "container_format": None
+        "container_format": None,
+        "os_hash_algo": "sha512",
+        "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+        "os_hidden": False,
+        "protected": False,
     }
 
     FAKE_LIST_IMAGES = {
@@ -66,7 +71,10 @@
                 "size": 13167616,
                 "min_ram": 0,
                 "schema": "/v2/schemas/image",
-                "virtual_size": None
+                "virtual_size": None,
+                "os_hash_algo": "sha512",
+                "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+                "os_hidden": False
             },
             {
                 "status": "active",
@@ -87,13 +95,54 @@
                 "size": 476704768,
                 "min_ram": 0,
                 "schema": "/v2/schemas/image",
-                "virtual_size": None
+                "virtual_size": None,
+                "os_hash_algo": "sha512",
+                "os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
+                "os_hidden": False
             }
         ],
         "schema": "/v2/schemas/images",
         "first": "/v2/images"
     }
 
+    FAKE_SHOW_IMAGE_TASKS = {
+        "tasks": [
+            {
+                "id": "ee22890e-8948-4ea6-9668-831f973c84f5",
+                "image_id": "dddddddd-dddd-dddd-dddd-dddddddddddd",
+                "request-id": "rrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr",
+                "user": "uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu",
+                "type": "api_image_import",
+                "status": "processing",
+                "owner": "64f0efc9955145aeb06f297a8a6fe402",
+                "expires_at": None,
+                "created_at": "2020-12-18T05:20:38.000000",
+                "updated_at": "2020-12-18T05:25:39.000000",
+                "deleted_at": None,
+                "deleted": False,
+                "input": {
+                    "image_id": "829c729b-ebc4-4cc7-a164-6f43f1149b17",
+                    "import_req": {
+                        "method": {
+                            "name": "copy-image",
+                        },
+                        "all_stores": True,
+                        "all_stores_must_succeed": False,
+                    },
+                    "backend": [
+                        "fast",
+                        "cheap",
+                        "slow",
+                        "reliable",
+                        "common",
+                    ]
+                },
+                "result": None,
+                "message": "Copied 15 MiB",
+            }
+        ]
+    }
+
     FAKE_TAG_NAME = "fake tag"
 
     def setUp(self):
@@ -167,7 +216,7 @@
             {}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
 
     def test_store_image_file(self):
-        data = six.BytesIO(data_utils.random_bytes())
+        data = io.BytesIO(data_utils.random_bytes())
 
         self.check_service_client_function(
             self.client.store_image_file,
@@ -219,3 +268,11 @@
 
     def test_list_images_with_bytes_body(self):
         self._test_list_images(bytes_body=True)
+
+    def test_show_image_tasks(self):
+        self.check_service_client_function(
+            self.client.show_image_tasks,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_IMAGE_TASKS,
+            True,
+            image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8")
diff --git a/tempest/tests/lib/services/image/v2/test_namespaces_client.py b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
index 3b057ad..db1ffae 100644
--- a/tempest/tests/lib/services/image/v2/test_namespaces_client.py
+++ b/tempest/tests/lib/services/image/v2/test_namespaces_client.py
@@ -18,16 +18,64 @@
 
 
 class TestNamespacesClient(base.BaseServiceTest):
-    FAKE_CREATE_SHOW_NAMESPACE = {
-        "namespace": "OS::Compute::Hypervisor",
-        "visibility": "public",
-        "description": "Tempest",
-        "display_name": u"\u2740(*\xb4\u25e1`*)\u2740",
-        "protected": True
+    FAKE_CREATE_NAMESPACE = {
+        "created_at": "2016-05-19T16:05:48Z",
+        "description": "A metadata definitions namespace.",
+        "display_name": "An Example Namespace",
+        "namespace": "FredCo::SomeCategory::Example",
+        "owner": "c60b1d57c5034e0d86902aedf8c49be0",
+        "protected": True,
+        "schema": "/v2/schemas/metadefs/namespace",
+        "self": "/v2/metadefs/namespaces/"
+                "FredCo::SomeCategory::Example",
+        "updated_at": "2016-05-19T16:05:48Z",
+        "visibility": "public"
+    }
+
+    FAKE_SHOW_NAMESPACE = {
+        "created_at": "2016-06-28T14:57:10Z",
+        "description": "The libvirt compute driver options.",
+        "display_name": "libvirt Driver Options",
+        "namespace": "OS::Compute::Libvirt",
+        "owner": "admin",
+        "properties": {
+            "boot_menu": {
+                "description": "If true, enables the BIOS bootmenu.",
+                "enum": [
+                    "true",
+                    "false"
+                ],
+                "title": "Boot Menu",
+                "type": "string"
+            },
+            "serial_port_count": {
+                "description": "Specifies the count of serial ports.",
+                "minimum": 0,
+                "title": "Serial Port Count",
+                "type": "integer"
+            }
+        },
+        "protected": True,
+        "resource_type_associations": [
+            {
+                "created_at": "2016-06-28T14:57:10Z",
+                "name": "OS::Glance::Image",
+                "prefix": "hw_"
+            },
+            {
+                "created_at": "2016-06-28T14:57:10Z",
+                "name": "OS::Nova::Flavor",
+                "prefix": "hw:"
+            }
+        ],
+        "schema": "/v2/schemas/metadefs/namespace",
+        "self": "/v2/metadefs/namespaces/OS::Compute::Libvirt",
+        "visibility": "public"
     }
 
     FAKE_LIST_NAMESPACES = {
-        "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc",
+        "first": "/v2/metadefs/namespaces?sort_key=created_at&"
+                 "sort_dir=asc",
         "namespaces": [
             {
                 "created_at": "2014-08-28T17:13:06Z",
@@ -89,7 +137,7 @@
         self.check_service_client_function(
             self.client.show_namespace,
             'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_CREATE_SHOW_NAMESPACE,
+            self.FAKE_SHOW_NAMESPACE,
             bytes_body,
             namespace="OS::Compute::Hypervisor")
 
@@ -104,7 +152,7 @@
         self.check_service_client_function(
             self.client.create_namespace,
             'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_CREATE_SHOW_NAMESPACE,
+            self.FAKE_CREATE_NAMESPACE,
             bytes_body,
             namespace="OS::Compute::Hypervisor",
             visibility="public", description="Tempest",
diff --git a/tempest/tests/lib/services/image/v2/test_resource_types_client.py b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
index 741b4eb..089e62e 100644
--- a/tempest/tests/lib/services/image/v2/test_resource_types_client.py
+++ b/tempest/tests/lib/services/image/v2/test_resource_types_client.py
@@ -48,6 +48,28 @@
         ]
     }
 
+    FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION = {
+        "created_at": "2020-03-07T18:20:44Z",
+        "name": "OS::Glance::Image",
+        "prefix": "hw:",
+        "updated_at": "2020-03-07T18:20:44Z"
+    }
+
+    FAKE_LIST_RESOURCE_TYPE_ASSOCIATION = {
+        "resource_type_associations": [
+            {
+                "created_at": "2020-03-07T18:20:44Z",
+                "name": "OS::Nova::Flavor",
+                "prefix": "hw:"
+            },
+            {
+                "created_at": "2020-03-07T18:20:44Z",
+                "name": "OS::Glance::Image",
+                "prefix": "hw_"
+            }
+        ]
+    }
+
     def setUp(self):
         super(TestResourceTypesClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -62,8 +84,48 @@
             self.FAKE_LIST_RESOURCETYPES,
             bytes_body)
 
+    def _test_create_resource_type_association(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_resource_type_association,
+            'tempest.lib.common.rest_client.RestClient.post',
+            self.FAKE_CREATE_RESOURCE_TYPE_ASSOCIATION,
+            bytes_body, status=201,
+            namespace_id="OS::Compute::Hypervisor",
+            name="OS::Glance::Image", prefix="hw_",
+            )
+
+    def _test_list_resource_type_association(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_type_association,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_RESOURCE_TYPE_ASSOCIATION,
+            bytes_body,
+            namespace_id="OS::Compute::Hypervisor",
+            )
+
     def test_list_resource_types_with_str_body(self):
         self._test_list_resource_types()
 
     def test_list_resource_types_with_bytes_body(self):
         self._test_list_resource_types(bytes_body=True)
+
+    def test_delete_resource_type_association(self):
+        self.check_service_client_function(
+            self.client.delete_resource_type_association,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {}, status=204,
+            namespace_id="OS::Compute::Hypervisor",
+            resource_name="OS::Glance::Image",
+            )
+
+    def test_create_resource_type_association_with_str_body(self):
+        self._test_create_resource_type_association()
+
+    def test_create_resource_type_association_with_bytes_body(self):
+        self._test_create_resource_type_association(bytes_body=True)
+
+    def test_list_resource_type_association_with_str_body(self):
+        self._test_list_resource_type_association()
+
+    def test_list_resource_type_association_with_bytes_body(self):
+        self._test_list_resource_type_association(bytes_body=True)
diff --git a/tempest/tests/lib/services/image/v2/test_versions_client.py b/tempest/tests/lib/services/image/v2/test_versions_client.py
index 6234b06..98c558a 100644
--- a/tempest/tests/lib/services/image/v2/test_versions_client.py
+++ b/tempest/tests/lib/services/image/v2/test_versions_client.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import fixtures
+
 from tempest.lib.services.image.v2 import versions_client
 from tempest.tests.lib import fake_auth_provider
 from tempest.tests.lib.services import base
@@ -92,3 +94,13 @@
 
     def test_list_versions_with_bytes_body(self):
         self._test_list_versions(bytes_body=True)
+
+    def test_has_version(self):
+        mocked_r = self.create_response(self.FAKE_VERSIONS_INFO, False,
+                                        300, None)
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.rest_client.RestClient.raw_request',
+            return_value=mocked_r))
+
+        self.assertTrue(self.client.has_version('2.1'))
+        self.assertFalse(self.client.has_version('9.9'))
diff --git a/tempest/tests/lib/services/network/test_base_network_client.py b/tempest/tests/lib/services/network/test_base_network_client.py
index e121cec..a426397 100644
--- a/tempest/tests/lib/services/network/test_base_network_client.py
+++ b/tempest/tests/lib/services/network/test_base_network_client.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from tempest.lib.services.network import base as base_network_client
 from tempest.tests.lib import fake_auth_provider
diff --git a/tempest/tests/lib/services/network/test_floating_ips_client.py b/tempest/tests/lib/services/network/test_floating_ips_client.py
index c5b1845..e8f2e5a 100644
--- a/tempest/tests/lib/services/network/test_floating_ips_client.py
+++ b/tempest/tests/lib/services/network/test_floating_ips_client.py
@@ -27,6 +27,8 @@
             {
                 "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip",
                 "created_at": "2016-12-21T10:55:50Z",
                 "updated_at": "2016-12-21T10:55:53Z",
                 "revision_number": 1,
@@ -37,11 +39,24 @@
                 "floating_ip_address": "172.24.4.228",
                 "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
                 "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
-                "status": "ACTIVE"
+                "status": "ACTIVE",
+                "port_details": {
+                    "status": "ACTIVE",
+                    "name": "",
+                    "admin_state_up": True,
+                    "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+                    "device_owner": "compute:nova",
+                    "mac_address": "fa:16:3e:b1:3b:30",
+                    "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+                },
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             },
             {
                 "router_id": None,
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip2",
                 "created_at": "2016-12-21T11:55:50Z",
                 "updated_at": "2016-12-21T11:55:53Z",
                 "revision_number": 2,
@@ -52,7 +67,10 @@
                 "floating_ip_address": "172.24.4.227",
                 "port_id": None,
                 "id": "61cea855-49cb-4846-997d-801b70c71bdd",
-                "status": "DOWN"
+                "status": "DOWN",
+                "port_details": None,
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             }
         ]
     }
diff --git a/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..ce068e9
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
@@ -0,0 +1,156 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import floating_ips_port_forwarding_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestFloatingIpsPortForwardingClient(base.BaseServiceTest):
+
+    FAKE_PORT_FORWARDING_REQUEST = {
+
+        "port_forwarding": {
+            "protocol": "tcp",
+            "internal_ip_address": "10.0.0.11",
+            "internal_port": 25,
+            "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+            "external_port": 2230,
+            "description": "Some description",
+            }
+
+        }
+
+    FAKE_PORT_FORWARDING_RESPONSE = {
+
+        "port_forwarding": {
+            "protocol": "tcp",
+            "internal_ip_address": "10.0.0.12",
+            "internal_port": 26,
+            "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+            "external_port": 2130,
+            "description": "Some description",
+            "id": "825ade3c-9760-4880-8080-8fc2dbab9acc"
+        }
+    }
+
+    FAKE_PORT_FORWARDINGS = {
+        "port_forwardings": [
+            FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+        ]
+    }
+
+    FAKE_FLOATINGIP_ID = "a6800594-5b7a-4105-8bfe-723b346ce866"
+
+    FAKE_PORT_FORWARDING_ID = "a7800594-5b7a-4105-8bfe-723b346ce866"
+
+    def setUp(self):
+        super(TestFloatingIpsPortForwardingClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.floating_ips_port_forwarding_client = \
+            floating_ips_port_forwarding_client.\
+            FloatingIpsPortForwardingClient(fake_auth,
+                                            "network",
+                                            "regionOne")
+
+    def _test_create_port_forwarding(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            create_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_PORT_FORWARDING_RESPONSE,
+            bytes_body,
+            201,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            **self.FAKE_PORT_FORWARDING_REQUEST)
+
+    def _test_list_port_forwardings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            list_port_forwardings,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_PORT_FORWARDINGS,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID)
+
+    def _test_show_port_forwardings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            show_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_PORT_FORWARDING_RESPONSE,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+    def _test_delete_port_forwarding(self):
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.
+            delete_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+    def _test_update_port_forwarding(self, bytes_body=False):
+        update_kwargs = {
+            "internal_port": "27"
+        }
+
+        resp_body = {
+            "port_forwarding": copy.deepcopy(
+                self.FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+            )
+        }
+        resp_body["port_forwarding"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.floating_ips_port_forwarding_client.update_port_forwarding,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            floatingip_id=self.FAKE_FLOATINGIP_ID,
+            port_forwarding_id=self.FAKE_PORT_FORWARDING_ID,
+            **update_kwargs)
+
+    def test_list_port_forwardings_with_str_body(self):
+        self._test_list_port_forwardings()
+
+    def test_list_port_forwardings_with_bytes_body(self):
+        self._test_list_port_forwardings(bytes_body=True)
+
+    def test_show_port_forwardings_with_str_body(self):
+        self._test_show_port_forwardings()
+
+    def test_show_port_forwardings_with_bytes_body(self):
+        self._test_show_port_forwardings(bytes_body=True)
+
+    def test_create_port_forwarding_with_str_body(self):
+        self._test_create_port_forwarding()
+
+    def test_create_port_forwarding_with_bytes_body(self):
+        self._test_create_port_forwarding(bytes_body=True)
+
+    def test_update_port_forwarding_with_str_body(self):
+        self._test_update_port_forwarding()
+
+    def test_update_port_forwarding_with_bytes_body(self):
+        self._test_update_port_forwarding(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_log_resource_client.py b/tempest/tests/lib/services/network/test_log_resource_client.py
new file mode 100644
index 0000000..ef502bc
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_log_resource_client.py
@@ -0,0 +1,145 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import log_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLogResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "logs": [
+            {
+                "name": "security group log1",
+                "description": "Log for test demo.",
+                "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
+                "project_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "92a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:03:04Z",
+                "updated_at": "2018-04-03T21:03:04Z",
+                "enabled": True,
+                "revision_number": 1,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            },
+            {
+                "name": "security group log2",
+                "description": "Log for test demo.",
+                "id": "46ebaec1-0570-43ac-82f6-60d2b03168c4",
+                "project_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "tenant_id": "82a5a4f4245a4abbafacb7ca73b027b0",
+                "created_at": "2018-04-03T21:04:04Z",
+                "updated_at": "2018-04-03T21:04:04Z",
+                "enabled": True,
+                "revision_number": 2,
+                "resource_type": "security_group",
+                "resource_id": None,
+                "target_id": None,
+                "event": "ALL"
+            }
+        ]
+    }
+
+    FAKE_LOG_ID = "2f245a7b-796b-4f26-9cf9-9e82d248fda7"
+
+    def setUp(self):
+        super(TestLogResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.log_resource_client = log_resource_client.LogResourceClient(
+            fake_auth, "network", "regionOne")
+
+    def _test_list_logs(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.list_logs,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def _test_show_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.show_log,
+            "tempest.lib.common.rest_client.RestClient.get",
+            {"log": self.FAKE_LOGS["logs"][0]},
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID)
+
+    def _test_create_log(self, bytes_body=False):
+        self.check_service_client_function(
+            self.log_resource_client.create_log,
+            "tempest.lib.common.rest_client.RestClient.post",
+            {"logs": self.FAKE_LOGS["logs"][1]},
+            bytes_body,
+            201,
+            log_id="2f245a7b-796b-4f26-9cf9-9e82d248fda7")
+
+    def _test_update_log(self, bytes_body=False):
+        update_kwargs = {
+            "tenant_id": "83a5a4f4245a4abbafacb7ca73b027b0"
+        }
+
+        resp_body = {
+            "logs": copy.deepcopy(
+                self.FAKE_LOGS["logs"][0]
+            )
+        }
+        resp_body["logs"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.log_resource_client.update_log,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            log_id=self.FAKE_LOG_ID,
+            **update_kwargs)
+
+    def test_list_logs_with_str_body(self):
+        self._test_list_logs()
+
+    def test_list_logs_with_bytes_body(self):
+        self._test_list_logs(bytes_body=True)
+
+    def test_create_log_with_str_body(self):
+        self._test_create_log()
+
+    def test_create_log_with_bytes_body(self):
+        self._test_create_log(bytes_body=True)
+
+    def test_show_log_with_str_body(self):
+        self._test_show_log()
+
+    def test_show_log_with_bytes_body(self):
+        self._test_show_log(bytes_body=True)
+
+    def test_update_log_with_str_body(self):
+        self._test_update_log()
+
+    def test_update_log_with_bytes_body(self):
+        self._test_update_log(bytes_body=True)
+
+    def test_delete_log(self):
+        self.check_service_client_function(
+            self.log_resource_client.delete_log,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            status=204,
+            log_id=self.FAKE_LOG_ID)
diff --git a/tempest/tests/lib/services/network/test_loggable_resource_client.py b/tempest/tests/lib/services/network/test_loggable_resource_client.py
new file mode 100644
index 0000000..232775b
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_loggable_resource_client.py
@@ -0,0 +1,53 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import loggable_resource_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestLoggableResourceClient(base.BaseServiceTest):
+
+    FAKE_LOGS = {
+        "loggable_resources": [
+            {
+                "type": "security_group"
+            },
+            {
+                "type": "none"
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestLoggableResourceClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.loggable_resource_client = \
+            loggable_resource_client.LoggableResourceClient(
+                fake_auth, "network", "regionOne")
+
+    def _test_list_loggable_resources(self, bytes_body=False):
+        self.check_service_client_function(
+            self.loggable_resource_client.list_loggable_resources,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_LOGS,
+            bytes_body,
+            200)
+
+    def test_list_loggable_resources_with_str_body(self):
+        self._test_list_loggable_resources()
+
+    def test_list_loggable_resources_with_bytes_body(self):
+        self._test_list_loggable_resources(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
index 078f4b0..17233bc 100644
--- a/tempest/tests/lib/services/network/test_networks_client.py
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -31,12 +31,17 @@
                     "nova"
                 ],
                 "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net1",
                 "port_security_enabled": True,
                 "project_id": "4fd44f30292945e481c7b8a0c8908869",
                 "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+                "revision_number": 1,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -46,7 +51,8 @@
                 "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": True,
-                "description": ""
+                "description": "",
+                "is_default": False
             },
             {
                 "admin_state_up": True,
@@ -54,12 +60,18 @@
                 "availability_zones": [
                     "nova"
                 ],
+                "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net2",
                 "port_security_enabled": True,
                 "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+                "revision_number": 3,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -69,7 +81,8 @@
                 "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": False,
-                "description": ""
+                "description": "",
+                "is_default": False
             }
         ]
     }
@@ -108,6 +121,7 @@
                 "alive": True,
                 "topic": "dhcp_agent",
                 "host": "osboxes",
+                "ha_state": None,
                 "agent_type": "DHCP agent",
                 "resource_versions": {},
                 "created_at": "2017-06-19 21:39:51",
diff --git a/tempest/tests/lib/services/network/test_ports_client.py b/tempest/tests/lib/services/network/test_ports_client.py
index 20ef3f1..9ca9ac6 100644
--- a/tempest/tests/lib/services/network/test_ports_client.py
+++ b/tempest/tests/lib/services/network/test_ports_client.py
@@ -22,53 +22,126 @@
 
 class TestPortsClient(base.BaseServiceTest):
 
+    FAKE_CREATE_PORTS = {
+        "port": {
+            "binding:host_id": "4df8d9ff-6f6f-438f-90a1-ef660d4586ad",
+            "binding:profile": {
+                "local_link_information": [
+                    {
+                        "port_id": "Ethernet3/1",
+                        "switch_id": "0a:1b:2c:3d:4e:5f",
+                        "switch_info": "switch1"
+                    }
+                ]
+            },
+            "binding:vnic_type": "baremetal",
+            "device_id": "d90a13da-be41-461f-9f99-1dbcf438fdf2",
+            "device_owner": "baremetal:none",
+            "dns_domain": "my-domain.org.",
+            "dns_name": "myport",
+            "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+            "uplink_status_propagation": False
+        }
+    }
+
     FAKE_PORTS = {
         "ports": [
             {
                 "admin_state_up": True,
                 "allowed_address_pairs": [],
+                "created_at": "2016-03-08T20:19:41",
                 "data_plane_status": None,
                 "description": "",
                 "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
                 "device_owner": "network:router_gateway",
-                "extra_dhcp_opts": [],
+                "dns_assignment": [
+                    {
+                        "hostname": "myport",
+                        "ip_address": "172.24.4.2",
+                        "fqdn": "myport.my-domain.org"
+                    }
+                ],
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myport",
+                "extra_dhcp_opts": [
+                    {
+                        "opt_value": "pxelinux.0",
+                        "ip_version": 4,
+                        "opt_name": "bootfile-name"
+                    }
+                ],
                 "fixed_ips": [
                     {
                         "ip_address": "172.24.4.2",
-                        "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062"
+                        "subnet_id":
+                            "008ba151-0b8c-4a67-98b5-0d2b87666062"
                     }
                 ],
                 "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
+                "ip_allocation": "immediate",
                 "mac_address": "fa:16:3e:58:42:ed",
                 "name": "",
                 "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
                 "project_id": "",
+                "revision_number": 1,
                 "security_groups": [],
                 "status": "ACTIVE",
-                "tenant_id": ""
+                "tags": ["tag1,tag2"],
+                "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa",
+                "updated_at": "2016-03-08T20:19:41",
+                "qos_network_policy_id":
+                    "174dd0c1-a4eb-49d4-a807-ae80246d82f4",
+                "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+                "port_security_enabled": False,
+                "uplink_status_propagation": False
             },
             {
                 "admin_state_up": True,
                 "allowed_address_pairs": [],
+                "created_at": "2016-03-08T20:19:41",
                 "data_plane_status": None,
                 "description": "",
                 "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
                 "device_owner": "network:router_interface",
-                "extra_dhcp_opts": [],
+                "dns_assignment": [
+                    {
+                        "hostname": "myport2",
+                        "ip_address": "10.0.0.1",
+                        "fqdn": "myport2.my-domain.org"
+                    }
+                ],
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myport2",
+                "extra_dhcp_opts": [
+                    {
+                        "opt_value": "pxelinux.0",
+                        "ip_version": 4,
+                        "opt_name": "bootfile-name"
+                    }
+                ],
                 "fixed_ips": [
                     {
                         "ip_address": "10.0.0.1",
-                        "subnet_id": "288bf4a1-51ba-43b6-9d0a-520e9005db17"
+                        "subnet_id":
+                            "288bf4a1-51ba-43b6-9d0a-520e9005db17"
                     }
                 ],
                 "id": "f71a6703-d6de-4be1-a91a-a570ede1d159",
+                "ip_allocation": "immediate",
                 "mac_address": "fa:16:3e:bb:3c:e4",
                 "name": "",
                 "network_id": "f27aa545-cbdd-4907-b0c6-c9e8b039dcc2",
                 "project_id": "d397de8a63f341818f198abb0966f6f3",
+                "revision_number": 1,
                 "security_groups": [],
                 "status": "ACTIVE",
-                "tenant_id": "d397de8a63f341818f198abb0966f6f3"
+                "tags": ["tag1,tag2"],
+                "tenant_id": "d397de8a63f341818f198abb0966f6f3",
+                "updated_at": "2016-03-08T20:19:41",
+                "qos_network_policy_id": None,
+                "qos_policy_id": None,
+                "port_security_enabled": False,
+                "uplink_status_propagation": False
             }
         ]
     }
@@ -112,7 +185,7 @@
         self.check_service_client_function(
             self.ports_client.create_port,
             "tempest.lib.common.rest_client.RestClient.post",
-            {"port": self.FAKE_PORTS["ports"][0]},
+            self.FAKE_CREATE_PORTS,
             bytes_body,
             201,
             **self.FAKE_PORT1)
diff --git a/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
new file mode 100644
index 0000000..e83792d
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_limit_bandwidth_rules_client.py
@@ -0,0 +1,124 @@
+# Copyright 2021 Red Hat.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib import decorators
+
+from tempest.lib.services.network import qos_limit_bandwidth_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_log import log as logging
+LOG = logging.getLogger('tempest')
+
+
+class TestQosLimitBandwidthRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MAX_BW_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MAX_BW_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'max_kbps': 1000,
+        'max_burst_kbps': 0,
+        'direction': 'ingress'
+    }
+
+    FAKE_MAX_BW_RULE_RESPONSE = {
+        'bandwidth_limit_rule': {
+            'id': FAKE_MAX_BW_RULE_ID,
+            'max_kbps': 10000,
+            'max_burst_kbps': 0,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MAX_BW_RULES = {
+        'bandwidth_limit_rules': [
+            FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosLimitBandwidthRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_limit_bw_client = qos_limit_bandwidth_rules_client.\
+            QosLimitBandwidthRulesClient(fake_auth, "network", "regionOne")
+
+    @decorators.idempotent_id('cde981fa-e93b-11eb-aacb-74e5f9e2a801')
+    def test_create_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.create_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MAX_BW_RULE_REQUEST
+        )
+
+    @decorators.idempotent_id('86e6803a-e974-11eb-aacb-74e5f9e2a801')
+    def test_update_limit_bandwidth_rules(self, bytes_body=False):
+        update_kwargs = {
+            "max_kbps": "20000"
+        }
+
+        resp_body = {
+            "bandwidth_limit_rule": copy.deepcopy(
+                self.FAKE_MAX_BW_RULE_RESPONSE['bandwidth_limit_rule']
+            )
+        }
+        resp_body["bandwidth_limit_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_limit_bw_client.update_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID,
+            **update_kwargs)
+
+    @decorators.idempotent_id('be60ae6e-e979-11eb-aacb-74e5f9e2a801')
+    def test_show_limit_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.show_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID
+        )
+
+    @decorators.idempotent_id('0a7c0964-e97b-11eb-aacb-74e5f9e2a801')
+    def test_delete_limit_bandwidth_rule(self):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.delete_limit_bandwidth_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MAX_BW_RULE_ID)
+
+    @decorators.idempotent_id('08df88ae-e97d-11eb-aacb-74e5f9e2a801')
+    def test_list_minimum_bandwidth_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_limit_bw_client.list_limit_bandwidth_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MAX_BW_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
diff --git a/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
new file mode 100644
index 0000000..3cc3de3
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_qos_minimum_packet_rate_rules_client.py
@@ -0,0 +1,135 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import qos_minimum_packet_rate_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestQosMinimumPacketRateRulesClient(base.BaseServiceTest):
+
+    FAKE_QOS_POLICY_ID = "f1011b08-1297-11e9-a1e7-c7e6825a2616"
+    FAKE_MIN_PPS_RULE_ID = "e758c89e-1297-11e9-a6cf-cf46a71e6699"
+
+    FAKE_MIN_PPS_RULE_REQUEST = {
+        'qos_policy_id': FAKE_QOS_POLICY_ID,
+        'min_kpps': 1000,
+        'direction': 'ingress'
+    }
+
+    FAKE_MIN_PPS_RULE_RESPONSE = {
+        'minimum_packet_rate_rule': {
+            'id': FAKE_MIN_PPS_RULE_ID,
+            'min_kpps': 1000,
+            'direction': 'ingress'
+        }
+    }
+
+    FAKE_MIN_PPS_RULES = {
+        'minimum_packet_rate_rules': [
+            FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+        ]
+    }
+
+    def setUp(self):
+        super(TestQosMinimumPacketRateRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.qos_min_pps_client = qos_minimum_packet_rate_rules_client.\
+            QosMinimumPacketRateRulesClient(fake_auth, "network", "regionOne")
+
+    def _test_create_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.create_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.post",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            201,
+            **self.FAKE_MIN_PPS_RULE_REQUEST
+        )
+
+    def _test_list_minimum_packet_rate_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.list_minimum_packet_rate_rules,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULES,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID
+        )
+
+    def _test_show_minimum_packet_rate_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.qos_min_pps_client.show_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_MIN_PPS_RULE_RESPONSE,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID
+        )
+
+    def _test_update_qos_polcy(self, bytes_body=False):
+        update_kwargs = {
+            "min_kpps": "20000"
+        }
+
+        resp_body = {
+            "minimum_packet_rate_rule": copy.deepcopy(
+                self.FAKE_MIN_PPS_RULE_RESPONSE['minimum_packet_rate_rule']
+            )
+        }
+        resp_body["minimum_packet_rate_rule"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.qos_min_pps_client.update_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID,
+            **update_kwargs)
+
+    def test_create_minimum_packet_rate_rule_with_str_body(self):
+        self._test_create_minimum_packet_rate_rule()
+
+    def test_create_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_create_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_update_minimum_packet_rate_rule_with_str_body(self):
+        self._test_update_qos_polcy()
+
+    def test_update_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_update_qos_polcy(bytes_body=True)
+
+    def test_show_minimum_packet_rate_rule_with_str_body(self):
+        self._test_show_minimum_packet_rate_rule()
+
+    def test_show_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_show_minimum_packet_rate_rule(bytes_body=True)
+
+    def test_delete_minimum_packet_rate_rule(self):
+        self.check_service_client_function(
+            self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            qos_policy_id=self.FAKE_QOS_POLICY_ID,
+            rule_id=self.FAKE_MIN_PPS_RULE_ID)
+
+    def test_list_minimum_packet_rate_rule_with_str_body(self):
+        self._test_list_minimum_packet_rate_rules()
+
+    def test_list_minimum_packet_rate_rule_with_bytes_body(self):
+        self._test_list_minimum_packet_rate_rules(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_quotas_client.py b/tempest/tests/lib/services/network/test_quotas_client.py
index aa6c1a1..7dce4e1 100644
--- a/tempest/tests/lib/services/network/test_quotas_client.py
+++ b/tempest/tests/lib/services/network/test_quotas_client.py
@@ -52,7 +52,7 @@
         }
     }
 
-    FAKE_QUOTA_TENANT_ID = "bab7d5c60cd041a0a36f7c4b6e1dd978"
+    FAKE_QUOTA_PROJECT_ID = "bab7d5c60cd041a0a36f7c4b6e1dd978"
 
     FAKE_QUOTA_DETAILS = {
         "quota": {
@@ -115,7 +115,7 @@
             self.FAKE_PROJECT_QUOTAS,
             bytes_body,
             200,
-            tenant_id=self.FAKE_QUOTA_TENANT_ID)
+            project_id=self.FAKE_QUOTA_PROJECT_ID)
 
     def _test_show_default_quotas(self, bytes_body=False):
         self.check_service_client_function(
@@ -124,7 +124,7 @@
             self.FAKE_PROJECT_QUOTAS,
             bytes_body,
             200,
-            tenant_id=self.FAKE_QUOTA_TENANT_ID)
+            project_id=self.FAKE_QUOTA_PROJECT_ID)
 
     def _test_update_quotas(self, bytes_body=False):
         self.check_service_client_function(
@@ -133,7 +133,7 @@
             self.FAKE_PROJECT_QUOTAS,
             bytes_body,
             200,
-            tenant_id=self.FAKE_QUOTA_TENANT_ID)
+            project_id=self.FAKE_QUOTA_PROJECT_ID)
 
     def _test_show_quota_details(self, bytes_body=False):
         self.check_service_client_function(
@@ -142,7 +142,7 @@
             self.FAKE_QUOTA_DETAILS,
             bytes_body,
             200,
-            tenant_id=self.FAKE_QUOTA_TENANT_ID)
+            project_id=self.FAKE_QUOTA_PROJECT_ID)
 
     def test_reset_quotas(self):
         self.check_service_client_function(
@@ -150,7 +150,7 @@
             "tempest.lib.common.rest_client.RestClient.delete",
             {},
             status=204,
-            tenant_id=self.FAKE_QUOTA_TENANT_ID)
+            project_id=self.FAKE_QUOTA_PROJECT_ID)
 
     def test_list_quotas_with_str_body(self):
         self._test_list_quotas()
diff --git a/tempest/tests/lib/services/network/test_security_group_rules_client.py b/tempest/tests/lib/services/network/test_security_group_rules_client.py
index b9c17a1..2ecc996 100644
--- a/tempest/tests/lib/services/network/test_security_group_rules_client.py
+++ b/tempest/tests/lib/services/network/test_security_group_rules_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.services.network import base as network_base
diff --git a/tempest/tests/lib/services/network/test_security_groups_client.py b/tempest/tests/lib/services/network/test_security_groups_client.py
index f96805f..501883b 100644
--- a/tempest/tests/lib/services/network/test_security_groups_client.py
+++ b/tempest/tests/lib/services/network/test_security_groups_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.services.network import base as network_base
diff --git a/tempest/tests/lib/services/network/test_trunks_client.py b/tempest/tests/lib/services/network/test_trunks_client.py
new file mode 100644
index 0000000..b637d5e
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_trunks_client.py
@@ -0,0 +1,201 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import trunks_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestTrunksClient(base.BaseServiceTest):
+
+    FAKE_TRUNK_ID = "dfbc2103-93cf-4edf-952a-ef6deb32ddc6"
+    FAKE_PORT_ID = "1f04eb36-6c84-11eb-b0ab-4fc62961629d"
+    FAKE_TRUNKS = {
+        "trunks": [
+            {
+                "admin_state_up": True,
+                "description": "",
+                "id": "dfbc2103-93cf-4edf-952a-ef6deb32ddc6",
+                "name": "trunk0",
+                "port_id": "00130aab-bb51-42a1-a7c4-6703a3a43aa5",
+                "project_id": "",
+                "revision_number": 2,
+                "status": "DOWN",
+                "sub_ports": [
+                    {
+                        "port_id": "87d2483d-e5e6-483d-b5f0-81b9ed1d1a91",
+                        "segmentation_id": 101,
+                        "segmentation_type": "vlan"
+                        }
+                    ],
+                "tags": [],
+            },
+            {
+                "admin_state_up": True,
+                "description": "",
+                "id": "9eb0e72e-11d3-4295-bcaf-6c89008d9f0a",
+                "name": "trunk1",
+                "port_id": "035a12bf-2ae3-42ae-8ad6-9f70640cddde",
+                "project_id": "",
+                "revision_number": 2,
+                "status": "DOWN",
+                "sub_ports": [
+                    {
+                        "port_id": "cba839d5-02e2-4e09-b964-81356da78165",
+                        "segmentation_id": 102,
+                        "segmentation_type": "vlan"
+                        }
+                    ],
+                "tags": [],
+            },
+        ]
+    }
+
+    FAKE_TRUNK_1 = {
+        "name": "trunk0",
+        "port_id": "00130aab-bb51-42a1-a7c4-6703a3a43aa5"
+    }
+
+    def setUp(self):
+        super(TestTrunksClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.trunks_client = trunks_client.TrunksClient(
+            fake_auth, "network", "regionOne")
+
+    def _test_create_trunk(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.create_trunk,
+            "tempest.lib.common.rest_client.RestClient.post",
+            {"trunk": self.FAKE_TRUNKS["trunks"][0]},
+            bytes_body,
+            201,
+            **self.FAKE_TRUNK_1)
+
+    def _test_list_trunks(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.list_trunks,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_TRUNKS,
+            bytes_body,
+            200)
+
+    def _test_show_trunk(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.show_trunk,
+            "tempest.lib.common.rest_client.RestClient.get",
+            {"trunk": self.FAKE_TRUNKS["trunks"][0]},
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID)
+
+    def _test_update_trunk(self, bytes_body=False):
+        update_kwargs = {
+            "admin_state_up": True,
+            "name": "new_trunk"
+        }
+
+        resp_body = {
+            "trunk": copy.deepcopy(
+                self.FAKE_TRUNKS["trunks"][0]
+            )
+        }
+        resp_body["trunk"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.trunks_client.update_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            **update_kwargs)
+
+    def _test_add_subports_to_trunk(self, bytes_body=False):
+        sub_ports = [{
+            "port_id": "f04eb36-6c84-11eb-b0ab-4fc62961629d",
+            "segmentation_type": "vlan",
+            "segmentation_id": "1001"
+        }]
+        resp_body = copy.deepcopy(self.FAKE_TRUNKS["trunks"][0])
+
+        resp_body["sub_ports"].append(sub_ports)
+        self.check_service_client_function(
+            self.trunks_client.add_subports_to_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            sub_ports=sub_ports)
+
+    def _test_delete_subports_from_trunk(self, bytes_body=False):
+        fake_sub_ports = self.FAKE_TRUNKS['trunks'][0]['sub_ports']
+        sub_ports = [
+            {"port_id": fake_sub_ports[0]['port_id']}
+        ]
+        resp_body = copy.deepcopy(self.FAKE_TRUNKS["trunks"][0])
+
+        resp_body['sub_ports'] = []
+        self.check_service_client_function(
+            self.trunks_client.delete_subports_from_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            sub_ports=sub_ports)
+
+    def test_create_trunk_with_str_body(self):
+        self._test_create_trunk()
+
+    def test_create_trunk_with_bytes_body(self):
+        self._test_create_trunk(bytes_body=True)
+
+    def test_list_trunks_with_str_body(self):
+        self._test_list_trunks()
+
+    def test_list_trunks_with_bytes_body(self):
+        self._test_list_trunks(bytes_body=True)
+
+    def test_show_trunk_with_str_body(self):
+        self._test_show_trunk()
+
+    def test_show_trunk_with_bytes_body(self):
+        self._test_show_trunk(bytes_body=True)
+
+    def test_update_trunk_with_str_body(self):
+        self._test_update_trunk()
+
+    def test_update_trunk_with_bytes_body(self):
+        self._test_update_trunk(bytes_body=True)
+
+    def test_add_subports_to_trunk_str_body(self):
+        self._test_add_subports_to_trunk()
+
+    def test_add_subports_to_trunk_bytes_body(self):
+        self._test_add_subports_to_trunk(bytes_body=True)
+
+    def test_delete_subports_from_trunk_str_body(self):
+        self._test_delete_subports_from_trunk()
+
+    def test_delete_subports_from_trunk_bytes_body(self):
+        self._test_delete_subports_from_trunk(bytes_body=True)
+
+    def test_delete_trunk(self):
+        self.check_service_client_function(
+            self.trunks_client.delete_trunk,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            trunk_id=self.FAKE_TRUNK_ID)
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
index a16d1d7..d6df243 100644
--- a/tempest/tests/lib/services/object_storage/test_object_client.py
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -14,7 +14,7 @@
 #    under the License.
 
 
-import mock
+from unittest import mock
 
 from tempest.lib import exceptions
 from tempest.lib.services.object_storage import object_client
@@ -31,15 +31,18 @@
         self.object_client = object_client.ObjectClient(self.fake_auth,
                                                         'swift', 'region1')
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_object_continue_no_data(self, mock_poc):
         self._validate_create_object_continue(None, mock_poc)
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_object_continue_with_data(self, mock_poc):
         self._validate_create_object_continue('hello', mock_poc)
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_continue_with_no_continue_received(self, mock_poc):
         self._validate_create_object_continue('hello', mock_poc,
                                               initial_status=201)
@@ -69,7 +72,7 @@
 
         # If the expected initial status is not 100, then an exception
         # should be thrown and the connection closed
-        if initial_status is 100:
+        if initial_status == 100:
             status, reason = \
                 self.object_client.create_object_continue(cnt, obj, req_data)
         else:
@@ -91,7 +94,7 @@
         mock_poc.return_value.endheaders.assert_called_once_with()
 
         # The following steps are only taken if the initial status is 100
-        if initial_status is 100:
+        if initial_status == 100:
             # Verify that the method returned what it was supposed to
             self.assertEqual(status, 201)
 
diff --git a/tempest/tests/lib/services/placement/test_resource_providers_client.py b/tempest/tests/lib/services/placement/test_resource_providers_client.py
new file mode 100644
index 0000000..2871395
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_resource_providers_client.py
@@ -0,0 +1,206 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.placement import resource_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestResourceProvidersClient(base.BaseServiceTest):
+    FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
+    FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
+    FAKE_RESOURCE_PROVIDER = {
+        'generation': 0,
+        'name': 'Ceph Storage Pool',
+        'uuid': FAKE_RESOURCE_PROVIDER_UUID,
+        'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
+        'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
+    }
+
+    FAKE_RESOURCE_PROVIDERS = {
+        'resource_providers': [FAKE_RESOURCE_PROVIDER]
+    }
+
+    FAKE_RESOURCE_PROVIDER_INVENTORIES = {
+        'inventories': {
+            'DISK_GB': {
+                'allocation_ratio': 1.0,
+                'max_unit': 35,
+                'min_unit': 1,
+                'reserved': 0,
+                'step_size': 1,
+                'total': 35
+            }
+        },
+        'resource_provider_generation': 7
+    }
+
+    FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
+    FAKE_RESOURCE_PROVIDER_AGGREGATES = {
+        'aggregates': [FAKE_AGGREGATE_UUID]
+    }
+    FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE = {
+        "inventories": {
+            "MEMORY_MB": {
+                "allocation_ratio": 2.0,
+                "max_unit": 16,
+                "min_unit": 1,
+                "reserved": 0,
+                "step_size": 4,
+                "total": 128
+            },
+            "VCPU": {
+                "allocation_ratio": 10.0,
+                "max_unit": 2147483647,
+                "min_unit": 1,
+                "reserved": 2,
+                "step_size": 1,
+                "total": 64
+            }
+        },
+        "resource_provider_generation": 2
+    }
+    FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST = {
+        "inventories": {
+            "MEMORY_MB": {
+                "allocation_ratio": 2.0,
+                "max_unit": 16,
+                "step_size": 4,
+                "total": 128
+            },
+            "VCPU": {
+                "allocation_ratio": 10.0,
+                "reserved": 2,
+                "total": 64
+            }
+        },
+        "resource_provider_generation": 1
+    }
+    FAKE_RESOURCE_PROVIDER_USAGES = {
+        "usages": {
+            "VCPU": 2,
+            "MEMORY_MB": 1024,
+            "DISK_GB": 10
+        },
+        "resource_provider_generation": 3
+    }
+
+    def setUp(self):
+        super(TestResourceProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = resource_providers_client.ResourceProvidersClient(
+            fake_auth, 'placement', 'regionOne')
+
+    def _test_list_resource_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDERS,
+            to_utf=bytes_body,
+            status=200
+        )
+
+    def test_list_resource_providers_with_bytes_body(self):
+        self._test_list_resource_providers()
+
+    def test_list_resource_providers_with_str_body(self):
+        self._test_list_resource_providers(bytes_body=True)
+
+    def _test_show_resource_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_resource_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_show_resource_provider_with_str_body(self):
+        self._test_show_resource_provider()
+
+    def test_show_resource_provider_with_bytes_body(self):
+        self._test_show_resource_provider(bytes_body=True)
+
+    def _test_list_resource_provider_inventories(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_inventories,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_inventories_with_str_body(self):
+        self._test_list_resource_provider_inventories()
+
+    def test_list_resource_provider_inventories_with_bytes_body(self):
+        self._test_list_resource_provider_inventories(bytes_body=True)
+
+    def _test_update_resource_providers_inventories(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_resource_providers_inventories,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
+            **self.FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST
+        )
+
+    def test_update_resource_providers_inventories_with_str_body(self):
+        self._test_update_resource_providers_inventories()
+
+    def test_update_resource_providers_inventories_with_bytes_body(self):
+        self._test_update_resource_providers_inventories(bytes_body=True)
+
+    def test_delete_resource_providers_inventories(self):
+        self.check_service_client_function(
+            self.client.delete_resource_providers_inventories,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            status=204,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
+        )
+
+    def _test_list_resource_provider_aggregates(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_aggregates,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_aggregates_with_str_body(self):
+        self._test_list_resource_provider_aggregates()
+
+    def test_list_resource_provider_aggregates_with_bytes_body(self):
+        self._test_list_resource_provider_aggregates(bytes_body=True)
+
+    def _test_list_resource_provider_usages(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_usages,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_USAGES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_show_resource_provider_usages_with_str_body(self):
+        self._test_list_resource_provider_inventories()
+
+    def test_show_resource_provider_usages_with_with_bytes_body(self):
+        self._test_list_resource_provider_inventories(bytes_body=True)
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
index 07af68a..a368705 100644
--- a/tempest/tests/lib/services/registry_fixture.py
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -38,8 +38,7 @@
         """Initialise the registry fixture"""
         self.services = set(['compute', 'identity.v2', 'identity.v3',
                              'image.v1', 'image.v2', 'network', 'placement',
-                             'volume.v1', 'volume.v2', 'volume.v3',
-                             'object-storage'])
+                             'volume.v2', 'volume.v3', 'object-storage'])
 
     def _setUp(self):
         # Cleanup the registry
diff --git a/tempest/tests/lib/services/test_clients.py b/tempest/tests/lib/services/test_clients.py
index 43fd88f..6c79db6 100644
--- a/tempest/tests/lib/services/test_clients.py
+++ b/tempest/tests/lib/services/test_clients.py
@@ -13,10 +13,9 @@
 # the License.
 
 import types
+from unittest import mock
 
 import fixtures
-import mock
-import six
 import testtools
 
 from tempest.lib import auth
@@ -270,8 +269,7 @@
                           'module_path': 'This neither',
                           'client_names': ['SomeClient1']}]}
         msg = "(?=.*{0})(?=.*{1})".format(
-            *[x[1][0]['module_path'] for x in six.iteritems(
-                fake_service_clients)])
+            *[x[1][0]['module_path'] for x in fake_service_clients.items()])
         self.useFixture(fixtures.MockPatchObject(
             clients.ClientsRegistry(), 'get_service_clients',
             return_value=fake_service_clients))
@@ -300,8 +298,8 @@
                           'module_path': 'fake_path_2',
                           'client_names': ['SomeClient2']}]}
         msg = "(?=.*{0})(?=.*{1})".format(
-            *[x[1][0]['service_version'] for x in six.iteritems(
-                fake_service_clients)])
+            *[x[1][0]['service_version'] for x in
+                fake_service_clients.items()])
         self.useFixture(fixtures.MockPatchObject(
             clients.ClientsRegistry(), 'get_service_clients',
             return_value=fake_service_clients))
diff --git a/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py b/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py
deleted file mode 100644
index 585904e..0000000
--- a/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import encryption_types_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestEncryptionTypesClient(base.BaseServiceTest):
-    FAKE_CREATE_ENCRYPTION_TYPE = {
-        "encryption": {
-            "id": "cbc36478b0bd8e67e89",
-            "name": "FakeEncryptionType",
-            "type": "fakeType",
-            "provider": "LuksEncryptor",
-            "cipher": "aes-xts-plain64",
-            "key_size": "512",
-            "control_location": "front-end"
-        }
-    }
-
-    FAKE_INFO_ENCRYPTION_TYPE = {
-        "encryption": {
-            "name": "FakeEncryptionType",
-            "type": "fakeType",
-            "description": "test_description",
-            "volume_type": "fakeType",
-            "provider": "LuksEncryptor",
-            "cipher": "aes-xts-plain64",
-            "key_size": "512",
-            "control_location": "front-end"
-        }
-    }
-
-    def setUp(self):
-        super(TestEncryptionTypesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = encryption_types_client.EncryptionTypesClient(fake_auth,
-                                                                    'volume',
-                                                                    'regionOne'
-                                                                    )
-
-    def _test_create_encryption(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_CREATE_ENCRYPTION_TYPE,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def _test_show_encryption_type(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_INFO_ENCRYPTION_TYPE,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def test_create_encryption_type_with_str_body(self):
-        self._test_create_encryption()
-
-    def test_create_encryption_type_with_bytes_body(self):
-        self._test_create_encryption(bytes_body=True)
-
-    def test_show_encryption_type_with_str_body(self):
-        self._test_show_encryption_type()
-
-    def test_show_encryption_type_with_bytes_body(self):
-        self._test_show_encryption_type(bytes_body=True)
-
-    def test_delete_encryption_type(self):
-        self.check_service_client_function(
-            self.client.delete_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            volume_type_id="cbc36478b0bd8e67e89",
-            status=202)
diff --git a/tempest/tests/lib/services/volume/v1/test_quotas_client.py b/tempest/tests/lib/services/volume/v1/test_quotas_client.py
deleted file mode 100644
index f9e76af..0000000
--- a/tempest/tests/lib/services/volume/v1/test_quotas_client.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import quotas_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestQuotasClient(base.BaseServiceTest):
-    FAKE_QUOTAS = {
-        "quota_set": {
-            "cores": 20,
-            "fixed_ips": -1,
-            "floating_ips": 10,
-            "id": "fake_tenant",
-            "injected_file_content_bytes": 10240,
-            "injected_file_path_bytes": 255,
-            "injected_files": 5,
-            "instances": 10,
-            "key_pairs": 100,
-            "metadata_items": 128,
-            "ram": 51200,
-            "security_group_rules": 20,
-            "security_groups": 10
-        }
-    }
-
-    FAKE_UPDATE_QUOTAS_REQUEST = {
-        "quota_set": {
-            "security_groups": 45
-        }
-    }
-
-    def setUp(self):
-        super(TestQuotasClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = quotas_client.QuotasClient(fake_auth,
-                                                 'volume',
-                                                 'regionOne')
-
-    def _test_show_default_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_default_quota_set,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_QUOTAS,
-            bytes_body, tenant_id="fake_tenant")
-
-    def _test_show_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_quota_set,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_QUOTAS,
-            bytes_body, tenant_id="fake_tenant")
-
-    def _test_update_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_quota_set,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_QUOTAS_REQUEST,
-            bytes_body, tenant_id="fake_tenant")
-
-    def test_show_default_quota_set_with_str_body(self):
-        self._test_show_default_quota_set()
-
-    def test_show_default_quota_set_with_bytes_body(self):
-        self._test_show_default_quota_set(bytes_body=True)
-
-    def test_show_quota_set_with_str_body(self):
-        self._test_show_quota_set()
-
-    def test_show_quota_set_with_bytes_body(self):
-        self._test_show_quota_set(bytes_body=True)
-
-    def test_update_quota_set_with_str_body(self):
-        self._test_update_quota_set()
-
-    def test_update_quota_set_with_bytes_body(self):
-        self._test_update_quota_set(bytes_body=True)
-
-    def test_delete_quota_set(self):
-        self.check_service_client_function(
-            self.client.delete_quota_set,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            tenant_id="fake_tenant")
diff --git a/tempest/tests/lib/services/volume/v1/test_snapshots_client.py b/tempest/tests/lib/services/volume/v1/test_snapshots_client.py
deleted file mode 100644
index 49191e3..0000000
--- a/tempest/tests/lib/services/volume/v1/test_snapshots_client.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import snapshots_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestSnapshotsClient(base.BaseServiceTest):
-    FAKE_CREATE_SNAPSHOT = {
-        "snapshot": {
-            "display_name": "snap-001",
-            "display_description": "Daily backup",
-            "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            "force": True
-        }
-    }
-
-    FAKE_UPDATE_SNAPSHOT_REQUEST = {
-        "metadata": {
-            "key": "v1"
-        }
-    }
-
-    FAKE_INFO_SNAPSHOT = {
-        "snapshot": {
-            "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-            "display_name": "snap-001",
-            "display_description": "Daily backup",
-            "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            "status": "available",
-            "size": 30,
-            "created_at": "2012-02-29T03:50:07Z"
-        }
-    }
-
-    FAKE_LIST_SNAPSHOTS = {
-        "snapshots": [
-            {
-                "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-                "display_name": "snap-001",
-                "display_description": "Daily backup",
-                "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-                "status": "available",
-                "size": 30,
-                "created_at": "2012-02-29T03:50:07Z",
-                "metadata": {
-                    "contents": "junk"
-                }
-            },
-            {
-                "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
-                "display_name": "snap-002",
-                "display_description": "Weekly backup",
-                "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
-                "status": "available",
-                "size": 25,
-                "created_at": "2012-03-19T01:52:47Z",
-                "metadata": {}
-            }
-        ]
-    }
-
-    def setUp(self):
-        super(TestSnapshotsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = snapshots_client.SnapshotsClient(fake_auth,
-                                                       'volume',
-                                                       'regionOne')
-
-    def _test_create_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_snapshot,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_CREATE_SNAPSHOT,
-            bytes_body)
-
-    def _test_show_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_snapshot,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_list_snapshots(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_snapshots,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_LIST_SNAPSHOTS,
-            bytes_body,
-            detail=True)
-
-    def _test_create_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-            metadata={"key": "v1"})
-
-    def _test_update_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_show_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_update_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body, snapshot_id="cbc36478b0bd8e67e89")
-
-    def _test_update_snapshot_metadata_item(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot_metadata_item,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def test_create_snapshot_with_str_body(self):
-        self._test_create_snapshot()
-
-    def test_create_snapshot_with_bytes_body(self):
-        self._test_create_snapshot(bytes_body=True)
-
-    def test_show_snapshot_with_str_body(self):
-        self._test_show_snapshot()
-
-    def test_show_snapshot_with_bytes_body(self):
-        self._test_show_snapshot(bytes_body=True)
-
-    def test_list_snapshots_with_str_body(self):
-        self._test_list_snapshots()
-
-    def test_list_snapshots_with_bytes_body(self):
-        self._test_list_snapshots(bytes_body=True)
-
-    def test_create_snapshot_metadata_with_str_body(self):
-        self._test_create_snapshot_metadata()
-
-    def test_create_snapshot_metadata_with_bytes_body(self):
-        self._test_create_snapshot_metadata(bytes_body=True)
-
-    def test_update_snapshot_with_str_body(self):
-        self._test_update_snapshot()
-
-    def test_update_snapshot_with_bytes_body(self):
-        self._test_update_snapshot(bytes_body=True)
-
-    def test_show_snapshot_metadata_with_str_body(self):
-        self._test_show_snapshot_metadata()
-
-    def test_show_snapshot_metadata_with_bytes_body(self):
-        self._test_show_snapshot_metadata(bytes_body=True)
-
-    def test_update_snapshot_metadata_with_str_body(self):
-        self._test_update_snapshot_metadata()
-
-    def test_update_snapshot_metadata_with_bytes_body(self):
-        self._test_update_snapshot_metadata(bytes_body=True)
-
-    def test_force_delete_snapshot(self):
-        self.check_service_client_function(
-            self.client.force_delete_snapshot,
-            'tempest.lib.common.rest_client.RestClient.post',
-            {},
-            snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            status=202)
-
-    def test_delete_snapshot(self):
-        self.check_service_client_function(
-            self.client.delete_snapshot,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            status=202)
diff --git a/tempest/tests/lib/services/volume/v3/test_attachments_client.py b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
new file mode 100644
index 0000000..52c94e5
--- /dev/null
+++ b/tempest/tests/lib/services/volume/v3/test_attachments_client.py
@@ -0,0 +1,46 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.volume.v3 import attachments_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
+
+class TestAttachmentsClient(base.BaseServiceTest):
+
+    FAKE_ATTACHMENT_INFO = {
+        "attachment": {
+            "status": "attaching",
+            "detached_at": "2015-09-16T09:28:52.000000",
+            "connection_info": {},
+            "attached_at": "2015-09-16T09:28:52.000000",
+            "attach_mode": "ro",
+            "instance": uuids.instance_id,
+            "volume_id": uuids.volume_id,
+            "id": uuids.id,
+        }
+    }
+
+    def setUp(self):
+        super(TestAttachmentsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = attachments_client.AttachmentsClient(fake_auth,
+                                                           'volume',
+                                                           'regionOne')
+
+    def test_show_attachment(self):
+        self.check_service_client_function(
+            self.client.show_attachment,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ATTACHMENT_INFO, attachment_id=uuids.id)
diff --git a/tempest/tests/lib/services/volume/v3/test_backups_client.py b/tempest/tests/lib/services/volume/v3/test_backups_client.py
index 97e1132..ca7918a 100644
--- a/tempest/tests/lib/services/volume/v3/test_backups_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_backups_client.py
@@ -45,6 +45,8 @@
                 "availability_zone": "az1",
                 "container": "volumebackups",
                 "created_at": "2013-04-02T10:35:27.000000",
+                "updated_at": "2013-04-02T10:39:27.000000",
+                "data_timestamp": "2013-04-02T10:35:27.000000",
                 "description": None,
                 "fail_reason": None,
                 "id": "2ef47aee-8844-490c-804d-2a8efe561c65",
@@ -64,7 +66,6 @@
                 "user_id": "515ba0dd59f84f25a6a084a45d8d93b2",
                 "size": 1,
                 "status": "available",
-                "updated_at": "2013-04-02T10:35:27.000000",
                 "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6",
                 "is_incremental": True,
                 "has_dependent_backups": False
diff --git a/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py b/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
index 70a3ee5..7218224 100644
--- a/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_encryption_types_client.py
@@ -20,11 +20,11 @@
 class TestEncryptionTypesClient(base.BaseServiceTest):
     FAKE_CREATE_ENCRYPTION_TYPE = {
         "encryption": {
-            "volume_type_id": "cbc36478b0bd8e67e89",
+            "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577",
             "control_location": "front-end",
             "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74",
             "key_size": 128,
-            "provider": "LuksEncryptor",
+            "provider": "luks",
             "cipher": "aes-xts-plain64"
         }
     }
diff --git a/tempest/tests/lib/services/volume/v3/test_group_types_client.py b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
index 8b853d7..33c7737 100644
--- a/tempest/tests/lib/services/volume/v3/test_group_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_group_types_client.py
@@ -23,8 +23,8 @@
     FAKE_CREATE_GROUP_TYPE = {
         "group_type": {
             "id": "6685584b-1eac-4da6-b5c3-555430cf68ff",
-            "name": "grp-type-001",
-            "description": "group type 001",
+            "name": "group-type-001",
+            "description": "Test group type 1",
             "is_public": True,
             "group_specs": {
                 "consistent_group_snapshot_enabled": "<is> False"
diff --git a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
index 84c7589..7606a52 100644
--- a/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_scheduler_stats_client.py
@@ -25,12 +25,14 @@
                 "name": "pool1",
                 "capabilities": {
                     "updated": "2014-10-28T00:00:00-00:00",
-                    "total_capacity": 1024,
-                    "free_capacity": 100,
+                    "total_capacity_gb": 1024,
+                    "free_capacity_gb": 100,
                     "volume_backend_name": "pool1",
                     "reserved_percentage": 0,
                     "driver_version": "1.0.0",
+                    "timestamp": "2014-10-28T00:00:00-00:00",
                     "storage_protocol": "iSCSI",
+                    "vendor_name": "vendor",
                     "QoS_support": False
                 }
             },
@@ -38,12 +40,14 @@
                 "name": "pool2",
                 "capabilities": {
                     "updated": "2014-10-28T00:00:00-00:00",
-                    "total_capacity": 512,
-                    "free_capacity": 200,
+                    "total_capacity_gb": 512,
+                    "free_capacity_gb": 200,
                     "volume_backend_name": "pool2",
                     "reserved_percentage": 0,
                     "driver_version": "1.0.2",
+                    "timestamp": "2014-10-28T00:00:00-00:00",
                     "storage_protocol": "iSER",
+                    "vendor_name": "vendor",
                     "QoS_support": True
                 }
             }
diff --git a/tempest/tests/lib/services/volume/v3/test_services_client.py b/tempest/tests/lib/services/volume/v3/test_services_client.py
index f65228f..c807bc2 100644
--- a/tempest/tests/lib/services/volume/v3/test_services_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_services_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.services.volume.v3 import services_client
diff --git a/tempest/tests/lib/services/volume/v3/test_snapshot_manage_client.py b/tempest/tests/lib/services/volume/v3/test_snapshot_manage_client.py
index 1b88020..8309f7a 100644
--- a/tempest/tests/lib/services/volume/v3/test_snapshot_manage_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_snapshot_manage_client.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from oslo_serialization import jsonutils as json
 
diff --git a/tempest/tests/lib/services/volume/v3/test_transfers_client.py b/tempest/tests/lib/services/volume/v3/test_transfers_client.py
index d631fe7..3626184 100644
--- a/tempest/tests/lib/services/volume/v3/test_transfers_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_transfers_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
-import mock
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.services.volume.v3 import transfers_client
@@ -52,6 +52,7 @@
         self.client = transfers_client.TransfersClient(fake_auth,
                                                        'volume',
                                                        'regionOne')
+        self.resource_path = 'os-volume-transfer'
 
     def _test_create_volume_transfer(self, bytes_body=False):
         resp_body = copy.deepcopy(self.FAKE_VOLUME_TRANSFER_INFO)
@@ -72,7 +73,7 @@
                 resp_body,
                 to_utf=bytes_body,
                 status=202,
-                mock_args=['os-volume-transfer', payload],
+                mock_args=[self.resource_path, payload],
                 **kwargs)
 
     def _test_accept_volume_transfer(self, bytes_body=False):
@@ -93,8 +94,9 @@
                 resp_body,
                 to_utf=bytes_body,
                 status=202,
-                mock_args=['os-volume-transfer/%s/accept' %
-                           self.FAKE_VOLUME_TRANSFER_ID, payload],
+                mock_args=['%s/%s/accept' % (self.resource_path,
+                                             self.FAKE_VOLUME_TRANSFER_ID),
+                           payload],
                 transfer_id=self.FAKE_VOLUME_TRANSFER_ID,
                 **kwargs)
 
@@ -156,3 +158,14 @@
             {},
             status=202,
             transfer_id="0e89cdd1-6249-421b-96d8-25fac0623d42")
+
+
+class TestTransfersV355Client(TestTransfersClient):
+
+    def setUp(self):
+        super(TestTransfersV355Client, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = transfers_client.TransfersV355Client(fake_auth,
+                                                           'volume',
+                                                           'regionOne')
+        self.resource_path = 'volume-transfers'
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
index 336aa32..19d6591 100644
--- a/tempest/tests/lib/services/volume/v3/test_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -121,6 +121,13 @@
             to_utf=bytes_body,
             volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
 
+    def _test_show_default_volume_type(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_default_volume_type,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+            to_utf=bytes_body)
+
     def _test_create_volume_type(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_volume_type,
@@ -224,6 +231,12 @@
     def test_show_volume_type_with_bytes_body(self):
         self._test_show_volume_type(bytes_body=True)
 
+    def test_show_default_volume_type_with_str_body(self):
+        self._test_show_default_volume_type()
+
+    def test_show_default_volume_type_with_bytes_body(self):
+        self._test_show_default_volume_type(bytes_body=True)
+
     def test_create_volume_type_str_body(self):
         self._test_create_volume_type()
 
diff --git a/tempest/tests/lib/services/volume/v3/test_versions_client.py b/tempest/tests/lib/services/volume/v3/test_versions_client.py
index 575cae3..862fb9b 100644
--- a/tempest/tests/lib/services/volume/v3/test_versions_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_versions_client.py
@@ -22,21 +22,6 @@
     FAKE_VERSIONS_INFO = {
         "versions": [
             {
-                "status": "DEPRECATED", "updated": "2016-05-02T20:25:19Z",
-                "links": [
-                    {"href": "http://docs.openstack.org/", "type": "text/html",
-                     "rel": "describedby"},
-                    {"href": "https://10.30.197.39:8776/v1/", "rel": "self"}
-                ],
-                "min_version": "",
-                "version": "",
-                "media-types": [
-                    {"base": "application/json",
-                     "type": "application/vnd.openstack.volume+json;version=1"}
-                ],
-                "id": "v1.0"
-            },
-            {
                 "status": "DEPRECATED", "updated": "2017-02-25T12:00:00Z",
                 "links": [
                     {"href": "http://docs.openstack.org/", "type": "text/html",
@@ -134,8 +119,6 @@
         self._test_show_version('v3', bytes_body=True)
 
     def test_get_base_version_url_app_name(self):
-        self._test_get_base_version_url('https://bar.org/volume/v1/123',
-                                        'https://bar.org/volume/')
         self._test_get_base_version_url('https://bar.org/volume/v2/123',
                                         'https://bar.org/volume/')
         self._test_get_base_version_url('https://bar.org/volume/v3/123',
diff --git a/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py b/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
index 902f027..3d47caf 100644
--- a/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
 
 from oslo_serialization import jsonutils as json
 
@@ -54,7 +54,6 @@
                 }
             ],
             "availability_zone": "nova",
-            "os-vol-host-attr:host": "controller1@rbd#rbd",
             "encrypted": False,
             "updated_at": None,
             "replication_status": None,
@@ -62,15 +61,12 @@
             "id": "c07cd4a4-b52b-4511-a176-fbaa2011a227",
             "size": 0,
             "user_id": "142d8663efce464c89811c63e45bd82e",
-            "os-vol-tenant-attr:tenant_id": "f21a9c86d7114bf99c711f4874d80474",
-            "os-vol-mig-status-attr:migstat": None,
             "metadata": {},
             "status": "creating",
             "description": "volume-manage-description",
             "multiattach": False,
             "source_volid": None,
             "consistencygroup_id": None,
-            "os-vol-mig-status-attr:name_id": None,
             "name": "volume-managed",
             "bootable": "false",
             "created_at": "2017-07-11T09:14:01.000000",
diff --git a/tempest/tests/lib/services/volume/v3/test_volumes_client.py b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
index 56c1a35..6bd75d9 100644
--- a/tempest/tests/lib/services/volume/v3/test_volumes_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volumes_client.py
@@ -26,10 +26,6 @@
         "volume-summary": {
             "total_size": 4,
             "total_count": 4,
-            "metadata": {
-                "key1": ["value1", "value2"],
-                "key2": ["value2"]
-            }
         }
     }
 
diff --git a/tempest/tests/lib/test_auth.py b/tempest/tests/lib/test_auth.py
index c3a792f..3edb122 100644
--- a/tempest/tests/lib/test_auth.py
+++ b/tempest/tests/lib/test_auth.py
@@ -786,6 +786,19 @@
                 self.assertIn(attr, auth_params.keys())
                 self.assertEqual(getattr(all_creds, attr), auth_params[attr])
 
+    def test_auth_parameters_with_system_scope(self):
+        all_creds = fake_credentials.FakeKeystoneV3AllCredentials()
+        self.auth_provider.credentials = all_creds
+        self.auth_provider.scope = 'system'
+        auth_params = self.auth_provider._auth_params()
+        self.assertNotIn('scope', auth_params.keys())
+        for attr in all_creds.get_init_attributes():
+            if attr.startswith('project_') or attr.startswith('domain_'):
+                self.assertNotIn(attr, auth_params.keys())
+            else:
+                self.assertIn(attr, auth_params.keys())
+                self.assertEqual(getattr(all_creds, attr), auth_params[attr])
+
 
 class TestKeystoneV3Credentials(base.TestCase):
     def testSetAttrUserDomain(self):
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index 9c6cac7..fc93f76 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -14,9 +14,8 @@
 #    under the License.
 
 import abc
+from unittest import mock
 
-import mock
-import six
 import testtools
 
 from tempest.lib import base as test
@@ -69,8 +68,7 @@
                                condition=True)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class BaseSkipDecoratorTests(object):
+class BaseSkipDecoratorTests(object, metaclass=abc.ABCMeta):
 
     @abc.abstractmethod
     def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index c849231..886d99c 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -12,11 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from io import StringIO
 import socket
+from unittest import mock
 
-import mock
-import six
-from six import StringIO
 import testtools
 
 from tempest.lib.common import ssh
@@ -30,7 +29,7 @@
     SELECT_POLLIN = 1
 
     @mock.patch('paramiko.RSAKey.from_private_key')
-    @mock.patch('six.StringIO')
+    @mock.patch('io.StringIO')
     def test_pkey_calls_paramiko_RSAKey(self, cs_mock, rsa_mock):
         cs_mock.return_value = mock.sentinel.csio
         pkey = 'mykey'
@@ -240,7 +239,7 @@
 
         return chan_mock, poll_mock, select_mock, client_mock
 
-    _utf8_string = six.unichr(1071)
+    _utf8_string = chr(1071)
     _utf8_bytes = _utf8_string.encode("utf-8")
 
     @mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
@@ -274,7 +273,7 @@
         client = ssh.Client('localhost', 'root', timeout=2)
         exc = self.assertRaises(exceptions.SSHExecCommandFailed,
                                 client.exec_command, "test")
-        self.assertIn('R' + self._utf8_string, six.text_type(exc))
+        self.assertIn('R' + self._utf8_string, str(exc))
 
     def test_exec_command_no_select(self):
         gsc_mock = self.patch('tempest.lib.common.ssh.Client.'
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
index 2b5a947..88c28bf 100644
--- a/tempest/tests/test_base_test.py
+++ b/tempest/tests/test_base_test.py
@@ -12,7 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import mock
+from unittest import mock
+
 from oslo_config import cfg
 
 from tempest import clients
@@ -108,7 +109,7 @@
 
         test.BaseTestCase.get_tenant_network(credentials_type=creds)
 
-        mock_gcm.assert_called_once_with(roles=['role1'])
+        mock_gcm.assert_called_once_with(roles=['role1'], scope='project')
         mock_gprov.assert_called_once_with()
         mock_gtn.assert_called_once_with(mock_prov, net_client,
                                          self.fixed_network_name)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 6018441..1889420 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -19,7 +19,6 @@
 from tempest.common import utils
 from tempest import config
 from tempest import exceptions
-from tempest.lib.common.utils import data_utils
 from tempest import test
 from tempest.tests import base
 from tempest.tests import fake_config
@@ -33,47 +32,6 @@
                          fake_config.FakePrivate)
 
 
-# NOTE: The test module is for tempest.test.idempotent_id.
-# After all projects switch to use decorators.idempotent_id,
-# we can remove tempest.test.idempotent_id as well as this
-# test module
-class TestIdempotentIdDecorator(BaseDecoratorsTest):
-
-    def _test_helper(self, _id, **decorator_args):
-        @test.idempotent_id(_id)
-        def foo():
-            """Docstring"""
-            pass
-
-        return foo
-
-    def _test_helper_without_doc(self, _id, **decorator_args):
-        @test.idempotent_id(_id)
-        def foo():
-            pass
-
-        return foo
-
-    def test_positive(self):
-        _id = data_utils.rand_uuid()
-        foo = self._test_helper(_id)
-        self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
-        self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
-    def test_positive_without_doc(self):
-        _id = data_utils.rand_uuid()
-        foo = self._test_helper_without_doc(_id)
-        self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
-    def test_idempotent_id_not_str(self):
-        _id = 42
-        self.assertRaises(TypeError, self._test_helper, _id)
-
-    def test_idempotent_id_not_valid_uuid(self):
-        _id = '42'
-        self.assertRaises(ValueError, self._test_helper, _id)
-
-
 class TestServicesDecorator(BaseDecoratorsTest):
     def _test_services_helper(self, *decorator_args):
         class TestFoo(test.BaseTestCase):
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 83c1abb..7c31185 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -101,17 +101,6 @@
             'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
             "\n"))
 
-    def test_no_vi_headers(self):
-        # NOTE(mtreinish)  The lines parameter is used only for finding the
-        # line location in the file. So these tests just pass a list of an
-        # arbitrary length to use for verifying the check function.
-        self.assertTrue(checks.no_vi_headers(
-            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
-        self.assertTrue(checks.no_vi_headers(
-            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
-        self.assertFalse(checks.no_vi_headers(
-            '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
-
     def test_service_tags_not_in_module_path(self):
         self.assertTrue(checks.service_tags_not_in_module_path(
             "@utils.services('compute')",
diff --git a/tempest/tests/test_imports.py b/tempest/tests/test_imports.py
index 6f1cfca..ad7bebb 100644
--- a/tempest/tests/test_imports.py
+++ b/tempest/tests/test_imports.py
@@ -12,7 +12,7 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
-import mock
+from unittest import mock
 
 from tempest.tests import base
 
diff --git a/tempest/tests/test_list_tests.py b/tempest/tests/test_list_tests.py
index 1cc9c9a..fe44ef6 100644
--- a/tempest/tests/test_list_tests.py
+++ b/tempest/tests/test_list_tests.py
@@ -16,8 +16,6 @@
 import re
 import subprocess
 
-import six
-
 from tempest.tests import base
 
 
@@ -32,7 +30,7 @@
         self.assertEqual(0, p.returncode,
                          "test discovery failed, one or more files cause an "
                          "error on import %s" % ids)
-        ids = six.text_type(ids).split('\n')
+        ids = str(ids).split('\n')
         for test_id in ids:
             if re.match(r'(\w+\.){3}\w+', test_id):
                 if not test_id.startswith('tempest.'):
diff --git a/tempest/tests/test_microversions.py b/tempest/tests/test_microversions.py
index ee6db71..835f51c 100644
--- a/tempest/tests/test_microversions.py
+++ b/tempest/tests/test_microversions.py
@@ -13,7 +13,6 @@
 #    under the License.
 
 from oslo_config import cfg
-import six
 import testtools
 
 from tempest.api.compute import base as compute_base
@@ -75,7 +74,7 @@
                 self.assertRaises(testtools.TestCase.skipException,
                                   test_class.skip_checks)
         except testtools.TestCase.skipException as e:
-            raise testtools.TestCase.failureException(six.text_type(e))
+            raise testtools.TestCase.failureException(str(e))
 
     def test_config_version_none_none(self):
         expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2]
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index 49fd010..9aeedb3 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -15,8 +15,8 @@
 
 import os
 import sys
+from unittest import mock
 
-import mock
 from oslo_config import cfg
 import testtools
 
@@ -453,6 +453,130 @@
             expected_creds[1][1:],
             mock_get_client_manager.mock_calls[1][2]['roles'])
 
+    def test_setup_credentials_with_role_and_system_scope(self):
+        expected_creds = [['system_my_role', 'role1', 'role2']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+        self.assertEqual(1, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'system',
+            mock_get_client_manager.mock_calls[0][2]['scope'])
+
+    def test_setup_credentials_with_multiple_role_and_system_scope(self):
+        expected_creds = [['system_my_role', 'role1', 'role2'],
+                          ['system_my_role2', 'role1', 'role2'],
+                          ['system_my_role3', 'role3']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role2'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role2)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role2'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role2)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role3'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role3)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role3'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role3)
+        self.assertEqual(3, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[0][2]['scope'])
+        self.assertEqual(
+            expected_creds[1][1:],
+            mock_get_client_manager.mock_calls[1][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[1][2]['scope'])
+        self.assertEqual(
+            expected_creds[2][1:],
+            mock_get_client_manager.mock_calls[2][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[2][2]['scope'])
+
+    def test_setup_credentials_with_role_and_multiple_scope(self):
+        expected_creds = [['my_role', 'role1', 'role2'],
+                          ['project_my_role', 'role1', 'role2'],
+                          ['domain_my_role', 'role1', 'role2'],
+                          ['system_my_role', 'role1', 'role2']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_project_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_project_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_project_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_project_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_domain_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_domain_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_domain_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_domain_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+
+        self.assertEqual(4, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'project', mock_get_client_manager.mock_calls[0][2]['scope'])
+        self.assertEqual(
+            expected_creds[1][1:],
+            mock_get_client_manager.mock_calls[1][2]['roles'])
+        self.assertEqual(
+            'project', mock_get_client_manager.mock_calls[1][2]['scope'])
+        self.assertEqual(
+            expected_creds[2][1:],
+            mock_get_client_manager.mock_calls[2][2]['roles'])
+        self.assertEqual(
+            'domain', mock_get_client_manager.mock_calls[2][2]['scope'])
+        self.assertEqual(
+            expected_creds[3][1:],
+            mock_get_client_manager.mock_calls[3][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[3][2]['scope'])
+
     def test_setup_class_overwritten(self):
 
         class OverridesSetup(self.parent_test):
diff --git a/test-requirements.txt b/test-requirements.txt
index 196387c..17fa9f1 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,8 +1,8 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-hacking>=1.1.0,<1.2.0 # Apache-2.0
-mock>=2.0.0 # BSD
+hacking>=3.0.1,<3.1.0;python_version>='3.5' # Apache-2.0
 coverage!=4.4,>=4.0 # Apache-2.0
 oslotest>=3.2.0 # Apache-2.0
+pycodestyle>=2.0.0,<2.6.0 # MIT
 flake8-import-order==0.11 # LGPLv3
diff --git a/tools/check_logs.py b/tools/check_logs.py
index de7e41d..8ab3af2 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -17,12 +17,12 @@
 
 import argparse
 import gzip
+import io
 import os
 import re
 import sys
+import urllib.request as urlreq
 
-import six
-import six.moves.urllib.request as urlreq
 import yaml
 
 # DEVSTACK_GATE_GRENADE is either unset if grenade is not running
@@ -56,39 +56,39 @@
     's-proxy'])
 
 
-def process_files(file_specs, url_specs, whitelists):
+def process_files(file_specs, url_specs, allow_lists):
     regexp = re.compile(r"^.* (ERROR|CRITICAL|TRACE) .*\[.*\-.*\]")
     logs_with_errors = []
     for (name, filename) in file_specs:
-        whitelist = whitelists.get(name, [])
+        allow_list = allow_lists.get(name, [])
         with open(filename) as content:
-            if scan_content(content, regexp, whitelist):
+            if scan_content(content, regexp, allow_list):
                 logs_with_errors.append(name)
     for (name, url) in url_specs:
-        whitelist = whitelists.get(name, [])
+        allow_list = allow_lists.get(name, [])
         req = urlreq.Request(url)
         req.add_header('Accept-Encoding', 'gzip')
         page = urlreq.urlopen(req)
-        buf = six.StringIO(page.read())
+        buf = io.StringIO(page.read())
         f = gzip.GzipFile(fileobj=buf)
-        if scan_content(f.read().splitlines(), regexp, whitelist):
+        if scan_content(f.read().splitlines(), regexp, allow_list):
             logs_with_errors.append(name)
     return logs_with_errors
 
 
-def scan_content(content, regexp, whitelist):
+def scan_content(content, regexp, allow_list):
     had_errors = False
     for line in content:
         if not line.startswith("Stderr:") and regexp.match(line):
-            whitelisted = False
-            for w in whitelist:
+            allowed = False
+            for w in allow_list:
                 pat = ".*%s.*%s.*" % (w['module'].replace('.', '\\.'),
                                       w['message'])
                 if re.match(pat, line):
-                    whitelisted = True
+                    allowed = True
                     break
-            if not whitelisted or dump_all_errors:
-                if not whitelisted:
+            if not allowed or dump_all_errors:
+                if not allowed:
                     had_errors = True
     return had_errors
 
@@ -105,9 +105,9 @@
         print("Must provide exactly one of -d or -u")
         return 1
     print("Checking logs...")
-    WHITELIST_FILE = os.path.join(
+    ALLOW_LIST_FILE = os.path.join(
         os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
-        "etc", "whitelist.yaml")
+        "etc", "allow-list.yaml")
 
     file_matcher = re.compile(r".*screen-([\w-]+)\.log")
     files = []
@@ -132,17 +132,17 @@
         if m:
             urls_to_process.append((m.group(1), u))
 
-    whitelists = {}
-    with open(WHITELIST_FILE) as stream:
+    allow_lists = {}
+    with open(ALLOW_LIST_FILE) as stream:
         loaded = yaml.safe_load(stream)
         if loaded:
-            for (name, l) in six.iteritems(loaded):
+            for (name, l) in loaded.values():
                 for w in l:
                     assert 'module' in w, 'no module in %s' % name
                     assert 'message' in w, 'no message in %s' % name
-            whitelists = loaded
+            allow_lists = loaded
     logs_with_errors = process_files(files_to_process, urls_to_process,
-                                     whitelists)
+                                     allow_lists)
 
     failed = False
     if logs_with_errors:
@@ -164,14 +164,14 @@
 
 
 usage = """
-Find non-white-listed log errors in log files from a devstack-gate run.
+Find non-allow-listed log errors in log files from a devstack-gate run.
 Log files will be searched for ERROR or CRITICAL messages. If any
-error messages do not match any of the whitelist entries contained in
-etc/whitelist.yaml, those messages will be printed to the console and
+error messages do not match any of the allow-list entries contained in
+etc/allow-list.yaml, those messages will be printed to the console and
 failure will be returned. A file directory containing logs or a url to the
 log files of an OpenStack gate job can be provided.
 
-The whitelist yaml looks like:
+The allow-list yaml looks like:
 
 log-name:
     - module: "a.b.c"
@@ -179,7 +179,7 @@
     - module: "a.b.c"
       message: "regexp"
 
-repeated for each log file with a whitelist.
+repeated for each log file with an allow-list.
 """
 
 parser = argparse.ArgumentParser(description=usage)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 64adcbe..b96bbe4 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -32,23 +32,51 @@
 
 # List of projects having tempest plugin stale or unmaintained for a long time
 # (6 months or more)
-# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
-# when the patches are merged.
-BLACKLIST = [
-    'x/gce-api',  # It looks gce-api doesn't support python3 yet.
-    'x/group-based-policy',  # It looks this doesn't support python3 yet.
-    'x/intel-nfv-ci-tests',  # https://review.opendev.org/#/c/634640/
+# TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
+# future when the patches are merged.
+NON_ACTIVE_LIST = [
+    'x/gce-api',  # It looks gce-api doesn't support python3 yet
+    # https://bugs.launchpad.net/gce-api/+bug/1931094
+    'x/glare',  # To avoid sanity-job failure
+    'x/group-based-policy',
+    # https://bugs.launchpad.net/group-based-policy/+bug/1931091
+    'x/intel-nfv-ci-tests',  # To avoid sanity-job failure
     'openstack/networking-generic-switch',
+    # This is not a real tempest plugin,
     # https://review.opendev.org/#/c/634846/
-    'openstack/networking-l2gw-tempest-plugin',
-    # https://review.opendev.org/#/c/635093/
-    'openstack/networking-midonet',  # https://review.opendev.org/#/c/635096/
-    'x/networking-plumgrid',  # https://review.opendev.org/#/c/635096/
+    'x/networking-plumgrid',  # No longer contains tempest tests
     'x/networking-spp',  # https://review.opendev.org/#/c/635098/
+    # networking-spp is missing neutron-tempest-plugin as a dep plus
+    # test-requirements.txt is nested in a openstack dir and sanity script
+    # doesn't count with such scenario yet
     'openstack/neutron-dynamic-routing',
+    # As tests have been migrated to neutron-tempest-plugin:
     # https://review.opendev.org/#/c/637718/
-    'openstack/neutron-vpnaas',  # https://review.opendev.org/#/c/637719/
-    'x/valet',  # https://review.opendev.org/#/c/638339/
+    'openstack/neutron-vpnaas',
+    # As tests have been migrated to neutron-tempest-plugin:
+    # https://review.opendev.org/c/openstack/neutron-vpnaas/+/695834
+    'x/valet',  # valet is unmaintained now
+    # https://review.opendev.org/c/x/valet/+/638339
+    'x/kingbird',  # kingbird is unmaintained now
+    # https://bugs.launchpad.net/kingbird/+bug/1869722
+    'x/mogan',
+    # mogan is unmaintained now, remove from the list when this is merged:
+    # https://review.opendev.org/c/x/mogan/+/767718
+    'x/vmware-nsx-tempest-plugin'
+    # Failing since 2021-08-27
+    # https://zuul.opendev.org/t/openstack/build
+    # /45f6c8d3c62d4387a70b7b471ec687c8
+    # Below plugins failing for error in psycopg2 __init__
+    # ImportError: libpq.so.5: cannot open shared object
+    # file: No such file or directory
+    # https://zuul.opendev.org/t/openstack/build
+    # /b61a48196dfa476d83645aea4853e544/log/job-output.txt#271722
+    # Failing since 2021-09-08
+    'x/networking-l2gw-tempest-plugin'
+    'x/novajoin-tempest-plugin'
+    'x/ranger-tempest-plugin'
+    'x/tap-as-a-service-tempest-plugin'
+    'x/trio2o'
 ]
 
 url = 'https://review.opendev.org/projects/'
@@ -81,10 +109,10 @@
         False
 
 
-if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
-    for black_plugin in BLACKLIST:
-        print(black_plugin)
-    # We just need BLACKLIST when we use this `blacklist` option.
+if len(sys.argv) > 1 and sys.argv[1] == 'nonactivelist':
+    for non_active_plugin in NON_ACTIVE_LIST:
+        print(non_active_plugin)
+    # We just need NON_ACTIVE_LIST when we use this `nonactivelist` option.
     # So, this exits here.
     sys.exit()
 
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index 961cd09..4430bbf 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -81,25 +81,25 @@
 
 printf "\n\n"
 
-# Print BLACKLIST
-if [[ -r doc/source/data/tempest-blacklisted-plugins-registry.header ]]; then
-    cat doc/source/data/tempest-blacklisted-plugins-registry.header
+# Print NON_ACTIVE_LIST
+if [[ -r doc/source/data/tempest-non-active-plugins-registry.header ]]; then
+    cat doc/source/data/tempest-non-active-plugins-registry.header
 fi
 
-blacklist=$(python tools/generate-tempest-plugins-list.py blacklist)
-name_col_len=$(echo "${blacklist}" | wc -L)
+nonactivelist=$(python tools/generate-tempest-plugins-list.py nonactivelist)
+name_col_len=$(echo "${nonactivelist}" | wc -L)
 name_col_len=$(( name_col_len + 20 ))
 
 printf "\n\n"
-print_plugin_table "${blacklist}"
+print_plugin_table "${nonactivelist}"
 
 printf "\n\n"
 
 if [[ -r doc/source/data/tempest-plugins-registry.footer ]]; then
     cat doc/source/data/tempest-plugins-registry.footer
 fi
-) > doc/source/plugin-registry.rst
+) > doc/source/plugins/plugin-registry.rst
 
 if [[ -n ${1} ]]; then
-    cp doc/source/plugin-registry.rst ${1}/doc/source/plugin-registry.rst
+    cp doc/source/plugins/plugin-registry.rst ${1}/doc/source/plugins/plugin-registry.rst
 fi
diff --git a/tools/tempest-integrated-gate-compute-blacklist.txt b/tools/tempest-integrated-gate-compute-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-compute-blacklist.txt
rename to tools/tempest-integrated-gate-compute-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-networking-blacklist.txt
rename to tools/tempest-integrated-gate-networking-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-object-storage-blacklist.txt b/tools/tempest-integrated-gate-object-storage-exclude-list.txt
similarity index 77%
rename from tools/tempest-integrated-gate-object-storage-blacklist.txt
rename to tools/tempest-integrated-gate-object-storage-exclude-list.txt
index 064cf46..c164343 100644
--- a/tools/tempest-integrated-gate-object-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-object-storage-exclude-list.txt
@@ -9,9 +9,10 @@
 tempest.api.identity
 
 # Skip network, compute, keystone only scenario tests
-tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_network_advanced_server_ops
-tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
-tempest.scenario.test_network_v6.TestGettingAddress.test_security_groups_basic_ops
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
 tempest.scenario.test_server_advanced_ops.TestServerAdvancedOps.test_server_sequence_suspend_resume
 tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
 tempest.scenario.test_server_multinode.TestServerMultinode.test_schedule_to_all_nodes
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-placement-blacklist.txt
rename to tools/tempest-integrated-gate-placement-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
new file mode 120000
index 0000000..2d691f8
--- /dev/null
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -0,0 +1 @@
+tempest-integrated-gate-storage-exclude-list.txt
\ No newline at end of file
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-exclude-list.txt
similarity index 67%
rename from tools/tempest-integrated-gate-storage-blacklist.txt
rename to tools/tempest-integrated-gate-storage-exclude-list.txt
index 3900f96..1ef6bb5 100644
--- a/tools/tempest-integrated-gate-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-storage-exclude-list.txt
@@ -8,6 +8,7 @@
 tempest.api.identity
 
 # Skip network only scenario tests.
-tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps.test_network_advanced_server_ops
-tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
-tempest.scenario.test_network_v6.TestGettingAddress.test_security_groups_basic_ops
+tempest.scenario.test_network_advanced_server_ops.TestNetworkAdvancedServerOps
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
+tempest.scenario.test_network_v6.TestGettingAddress
+tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index b484a41..106a9c6 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -44,7 +44,7 @@
 # retrieve a list of projects having tempest plugins
 PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
 
-BLACKLIST="$(python tools/generate-tempest-plugins-list.py blacklist)"
+NON_ACTIVE_LIST="$(python tools/generate-tempest-plugins-list.py nonactivelist)"
 
 # Function to clone project using zuul-cloner or from git
 function clone_project {
@@ -60,13 +60,13 @@
     fi
 }
 
-: ${UPPER_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
-DEPS="-c${UPPER_CONSTRAINTS_FILE}"
+: ${TOX_CONSTRAINTS_FILE:="https://releases.openstack.org/constraints/upper/master"}
+DEPS="-c${TOX_CONSTRAINTS_FILE}"
 
 # function to create virtualenv to perform sanity operation
 function prepare_workspace {
     SANITY_DIR=$(pwd)
-    virtualenv -p python3 --clear "$SANITY_DIR"/.venv
+    python3 -m venv "$SANITY_DIR"/.venv
     export TVENV="$SANITY_DIR/tools/with_venv.sh"
     cd "$SANITY_DIR"
 
@@ -117,8 +117,8 @@
 failed_plugin=''
 # Perform sanity on all tempest plugin projects
 for project in $PROJECT_LIST; do
-    # Remove blacklisted tempest plugins
-    if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
+    # Remove non-active tempest plugins
+    if ! [[ `echo $NON_ACTIVE_LIST | grep -c $project ` -gt 0 ]]; then
         plugin_sanity_check $project && passed_plugin+=", $project" || \
         failed_plugin+="$project, " > $SANITY_DIR/$project.txt
     fi
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
index 2596395..bfb1403 100755
--- a/tools/verify-ipv6-only-deployments.sh
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -1,92 +1,8 @@
 #!/bin/bash
-#
-#
-# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that
-# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack
-# plugins are missing the required setting to listen on IPv6 address. This is run as part of
-# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6'
-# can expand the IPv6 verification specific to project by defining the new post-run script which
-# will run along with this base script.
-# If there are more common verification for IPv6 then we can always extent this script.
 
-# Keep track of the DevStack directory
-TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
-source $TOP_DIR/stackrc
-source $TOP_DIR/openrc admin admin
+# NOTE(yoctozepto): This scripts lives now in devstack where it belongs.
+# It is kept here for the legacy (dsvm) jobs which look for it in tempest still.
+# TODO: Drop it when no legacy jobs use the master tempest.
 
-function verify_devstack_ipv6_setting {
-    local _service_host=''
-    _service_host=$(echo $SERVICE_HOST | tr -d [])
-    local _host_ipv6=''
-    _host_ipv6=$(echo $HOST_IPV6 | tr -d [])
-    local _service_listen_address=''
-    _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
-    local _service_local_host=''
-    _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
-    if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
-        echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
-        exit 1
-    fi
-    is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
-    if [[ "$is_service_host_ipv6" != "True" ]]; then
-        echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
-        exit 1
-    fi
-    is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
-    if [[ "$is_host_ipv6" != "True" ]]; then
-        echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
-        exit 1
-    fi
-    is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
-    if [[ "$is_service_listen_address" != "True" ]]; then
-        echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
-        exit 1
-    fi
-    is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
-    if [[ "$is_service_local_host" != "True" ]]; then
-        echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
-        exit 1
-    fi
-    echo "Devstack is properly configured with IPv6"
-    echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
-}
-
-function sanity_check_system_ipv6_enabled {
-    system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())')
-    if [[ $system_ipv6_enabled != "True" ]]; then
-        echo "IPv6 is disabled in system"
-        exit 1
-    fi
-    echo "IPv6 is enabled in system"
-}
-
-function verify_service_listen_address_is_ipv6 {
-    local endpoints_verified=False
-    local all_ipv6=True
-    endpoints=$(openstack endpoint list -f value -c URL)
-    for endpoint in ${endpoints}; do
-        local endpoint_address=''
-        endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}')
-        endpoint_address=$(echo $endpoint_address | tr -d [])
-        local is_endpoint_ipv6=''
-        is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
-        if [[ "$is_endpoint_ipv6" != "True" ]]; then
-            all_ipv6=False
-            echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
-            continue
-        fi
-        endpoints_verified=True
-    done
-    if [[ "$all_ipv6" == "False"  ]] || [[ "$endpoints_verified" == "False" ]]; then
-        exit 1
-    fi
-    echo "All services deployed by devstack is on IPv6 endpoints"
-    echo $endpoints
-}
-
-#First thing to verify if system has IPv6 enabled or not
-sanity_check_system_ipv6_enabled
-#Verify whether devstack is configured properly with IPv6 setting
-verify_devstack_ipv6_setting
-#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6.
-verify_service_listen_address_is_ipv6
+DEVSTACK_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
+$DEVSTACK_DIR/tools/verify-ipv6-only-deployments.sh
diff --git a/tox.ini b/tox.ini
index a8a2297..18f2aa6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
-envlist = pep8,py36,py37,bashate,pip-check-reqs
-minversion = 3.1.1
+envlist = pep8,py36,py39,bashate,pip-check-reqs
+minversion = 3.18.0
 skipsdist = True
 ignore_basepython_conflict = True
 
@@ -24,10 +24,10 @@
     OS_STDERR_CAPTURE=1
     OS_TEST_TIMEOUT=160
     PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
 usedevelop = True
 install_command = pip install {opts} {packages}
-whitelist_externals = *
+allowlist_externals = *
 deps =
     -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
@@ -106,7 +106,7 @@
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
 commands =
     find . -type f -name "*.pyc" -delete
     tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
@@ -123,6 +123,18 @@
     find . -type f -name "*.pyc" -delete
     tempest run --regex '(^tempest\.scenario.*)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
 
+[testenv:api-microversion-tests]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The regex below is used to select all tempest api tests for services having API
+# microversion concept.
+commands =
+    find . -type f -name "*.pyc" -delete
+    tempest run --regex '(^tempest\.api\.compute)|(^tempest\.api\.volume)' {posargs}
+
 [testenv:integrated-network]
 envdir = .tox/tempest
 sitepackages = {[tempestenv]sitepackages}
@@ -130,11 +142,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
 
 [testenv:integrated-compute]
 envdir = .tox/tempest
@@ -143,11 +155,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
 
 [testenv:integrated-placement]
 envdir = .tox/tempest
@@ -156,11 +168,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
 
 [testenv:integrated-storage]
 envdir = .tox/tempest
@@ -169,11 +181,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
 
 [testenv:integrated-object-storage]
 envdir = .tox/tempest
@@ -182,11 +194,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
 
 [testenv:full-serial]
 envdir = .tox/tempest
@@ -196,7 +208,7 @@
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
 commands =
     find . -type f -name "*.pyc" -delete
     tempest run --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))' {posargs}
@@ -277,18 +289,33 @@
 [testenv:docs]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
+  sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
+  sphinx-apidoc -f -o doc/source/tests/identity tempest/api/identity
+  sphinx-apidoc -f -o doc/source/tests/image tempest/api/image
+  sphinx-apidoc -f -o doc/source/tests/network tempest/api/network
+  sphinx-apidoc -f -o doc/source/tests/object_storage tempest/api/object_storage
+  sphinx-apidoc -f -o doc/source/tests/scenario tempest/scenario
+  sphinx-apidoc -f -o doc/source/tests/volume tempest/api/volume
   rm -rf doc/build
   sphinx-build -W -b html doc/source doc/build/html
-whitelist_externals = rm
+allowlist_externals =
+    rm
 
 [testenv:pdf-docs]
 deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
+   rm
    make
 commands =
+   sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
+   sphinx-apidoc -f -o doc/source/tests/identity tempest/api/identity
+   sphinx-apidoc -f -o doc/source/tests/image tempest/api/image
+   sphinx-apidoc -f -o doc/source/tests/network tempest/api/network
+   sphinx-apidoc -f -o doc/source/tests/object_storage tempest/api/object_storage
+   sphinx-apidoc -f -o doc/source/tests/scenario tempest/scenario
+   sphinx-apidoc -f -o doc/source/tests/volume tempest/api/volume
    sphinx-build -W -b latex doc/source doc/build/pdf
    make -C doc/build/pdf
 
@@ -311,7 +338,6 @@
     check-uuid --fix
 
 [hacking]
-local-check-factory = tempest.hacking.checks.factory
 import_exceptions = tempest.services
 
 [flake8]
@@ -319,22 +345,42 @@
 # E123 skipped because it is ignored by default in the default pep8
 # E129 skipped because it is too limiting when combined with other rules
 # W504 skipped because it is overeager and unnecessary
-ignore = E125,E123,E129,W504
+# H405 skipped because it arbitrarily forces doctring "title" lines
+ignore = E125,E123,E129,W504,H405
 show-source = True
 exclude = .git,.venv,.tox,dist,doc,*egg,build
 enable-extensions = H106,H203,H904
 import-order-style = pep8
 
+[flake8:local-plugins]
+extension =
+  T102 = checks:import_no_clients_in_api_and_scenario_tests
+  T104 = checks:scenario_tests_need_service_tags
+  T105 = checks:no_setup_teardown_class_for_tests
+  T107 = checks:service_tags_not_in_module_path
+  T108 = checks:no_hyphen_at_end_of_rand_name
+  N322 = checks:no_mutable_default_args
+  T109 = checks:no_testtools_skip_decorator
+  T110 = checks:get_resources_on_service_clients
+  T111 = checks:delete_resources_on_service_clients
+  T112 = checks:dont_import_local_tempest_into_lib
+  T113 = checks:use_rand_uuid_instead_of_uuid4
+  T114 = checks:dont_use_config_in_tempest_lib
+  T115 = checks:dont_put_admin_tests_on_nonadmin_path
+  T116 = checks:unsupported_exception_attribute_PY3
+  T117 = checks:negative_test_attribute_always_applied_to_negative_tests
+paths =
+  ./tempest/hacking
+
 [testenv:releasenotes]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
   rm -rf releasenotes/build
   sphinx-build -a -E -W -d releasenotes/build/doctrees \
          -b html releasenotes/source releasenotes/build/html
-whitelist_externals = rm
+allowlist_externals = rm
 
 [testenv:bashate]
 # if you want to test out some changes you have made to bashate
@@ -342,7 +388,7 @@
 # modified bashate tree
 deps =
    {env:BASHATE_INSTALL_PATH:bashate}
-whitelist_externals = bash
+allowlist_externals = bash
 commands = bash -c "find {toxinidir}/tools    \
          -not \( -type d -name .?\* -prune \) \
          -type f                              \
@@ -371,6 +417,18 @@
 
 [testenv:plugin-sanity-check]
 # perform tempest plugin sanity
-whitelist_externals = bash
+allowlist_externals = bash
 commands =
   bash tools/tempest-plugin-sanity.sh
+
+[testenv:stestr-master]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The below command install stestr master version and run smoke tests
+commands =
+    find . -type f -name "*.pyc" -delete
+    pip install -U git+https://github.com/mtreinish/stestr
+    tempest run --regex '\[.*\bsmoke\b.*\]' {posargs}
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
new file mode 100644
index 0000000..3deb944
--- /dev/null
+++ b/zuul.d/base.yaml
@@ -0,0 +1,86 @@
+- job:
+    name: devstack-tempest
+    parent: devstack
+    description: |
+      Base Tempest job.
+
+      This Tempest job provides the base for both the single and multi-node
+      test setup. To run a multi-node test inherit from devstack-tempest and
+      set the nodeset to a multi-node one.
+    required-projects: &base_required-projects
+      - opendev.org/openstack/tempest
+    timeout: 7200
+    roles: &base_roles
+      - zuul: opendev.org/openstack/devstack
+    vars: &base_vars
+      devstack_services:
+        tempest: true
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            compute:
+              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+      test_results_stage_name: test_results
+      zuul_copy_output:
+        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+        '{{ devstack_base_dir }}/tempest/tempest.log': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+        '{{ stage_dir }}/stackviz': logs
+      extensions_to_txt:
+        conf: true
+        log: true
+        yaml: true
+        yml: true
+    run: playbooks/devstack-tempest.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: devstack-tempest-ipv6
+    parent: devstack-ipv6
+    description: |
+      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+      which set the IPv6-only setting for OpenStack services. As part of
+      run phase, this job will verify the IPv6 setting and check the services
+      endpoints and listen addresses are IPv6. Basically it will run the script
+      ./tool/verify-ipv6-only-deployments.sh
+
+      Child jobs of this job can run their own set of tests and can
+      add post-run playebooks to extend the IPv6 verification specific
+      to their deployed services.
+      Check the wiki page for more details about project jobs setup
+      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+    required-projects: *base_required-projects
+    timeout: 7200
+    roles: *base_roles
+    vars: *base_vars
+    run: playbooks/devstack-tempest-ipv6.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: tempest-multinode-full-base
+    parent: devstack-tempest
+    description: |
+      Base multinode integration test with Neutron networking and py27.
+      Former names for this job were:
+        * neutron-tempest-multinode-full
+        * legacy-tempest-dsvm-neutron-multinode-full
+        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+      This job includes two nodes, controller / tempest plus a subnode, but
+      it can be used with different topologies, as long as a controller node
+      and a tempest one exist.
+    timeout: 10800
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        FORCE_CONFIG_DRIVE: false
+        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+        LIVE_MIGRATION_AVAILABLE: true
+        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+    group-vars:
+      peers:
+        devstack_localrc:
+          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+          LIVE_MIGRATION_AVAILABLE: true
+          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
new file mode 100644
index 0000000..753b8fe
--- /dev/null
+++ b/zuul.d/integrated-gate.yaml
@@ -0,0 +1,388 @@
+# NOTE(gmann): This file includes all integrated jobs definition which
+# are supposed to be run by Tempest and other projects as
+# integrated testing.
+- job:
+    name: tempest-all
+    parent: devstack-tempest
+    description: |
+      Integration test that runs all tests.
+      Former name for this job was:
+        * legacy-periodic-tempest-dsvm-all-master
+    vars:
+      tox_envlist: all
+      tempest_test_regex: tempest
+      # TODO(gmann): Enable File injection tests once nova bug is fixed
+      # https://bugs.launchpad.net/nova/+bug/1882421
+      # devstack_localrc:
+      #   ENABLE_FILE_INJECTION: true
+
+- job:
+    name: tempest-ipv6-only
+    parent: devstack-tempest-ipv6
+    # This currently works from stable/pike on.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Integration test of IPv6-only deployments. This job runs
+      smoke and IPv6 relates tests only. Basic idea is to test
+      whether OpenStack Services listen on IPv6 addrress or not.
+    timeout: 10800
+    vars:
+      tox_envlist: ipv6-only
+
+- job:
+    name: tempest-full
+    parent: devstack-tempest
+    # This currently works from stable/pike on.
+    # Before stable/pike, legacy version of tempest-full
+    # 'legacy-tempest-dsvm-neutron-full' run.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking and py27.
+      This job is supposed to run until stable/train setup only.
+      If you are running it on stable/ussuri gate onwards for python2.7
+      coverage then you need to do override-checkout with any stable
+      branch less than or equal to stable/train.
+      Former names for this job where:
+        * legacy-tempest-dsvm-neutron-full
+        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        ENABLE_FILE_INJECTION: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        USE_PYTHON3: False
+      devstack_services:
+        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+        # since tempest-full is in the integrated-gate project template but
+        # the backup tests do not really involve other services so they should
+        # be run in some more cinder-specific job, especially because the
+        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
+    # This job version is with swift enabled on py3
+    # as swift is ready on py3 from stable/ussuri onwards.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    description: |
+      Base integration test with Neutron networking, horizon, swift enable,
+      and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+
+- job:
+    name: tempest-integrated-networking
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This  job runs integration tests for networking. This is subset of
+      'tempest-full-py3' job and run only Neutron and Nova related tests.
+      This is meant to be run on neutron gate only.
+    vars:
+      tox_envlist: integrated-network
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-compute
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+    vars:
+      tox_envlist: integrated-compute
+      tempest_exclude_regex: ""
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-compute-centos-8-stream
+    parent: tempest-integrated-compute
+    nodeset: devstack-single-node-centos-8-stream
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+      This version of the job also uses CentOS 8 stream.
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
+    name: tempest-integrated-placement
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for placement. This is
+      subset of 'tempest-full-py3' job and run Nova and Neutron
+      related tests. This is meant to be run on Placement gate only.
+    vars:
+      tox_envlist: integrated-placement
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for image & block storage. This is
+      subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
+      related tests. This is meant to be run on Cinder and Glance gate only.
+    vars:
+      tox_envlist: integrated-storage
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+
+- job:
+    name: tempest-integrated-object-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for object storage. This is
+      subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
+      related tests. This is meant to be run on Swift gate only.
+    vars:
+      tox_envlist: integrated-object-storage
+      devstack_localrc:
+        # NOTE(gmann): swift is not ready on python3 yet and devstack
+        # install it on python2.7 only. But settting the USE_PYTHON3
+        # for future once swift is ready on py3.
+        USE_PYTHON3: true
+
+- job:
+    name: tempest-with-latest-microversion
+    parent: tempest-full-py3
+    description: |
+      This job runs compute, placement and volume API tests with 'latest'
+      API microversion (This can be extended to other services having API
+      microversion concept).
+    vars:
+      tox_envlist: api-microversion-tests
+      devstack_localrc:
+        TEMPEST_COMPUTE_MIN_MICROVERSION: 'latest'
+        TEMPEST_VOLUME_MIN_MICROVERSION: 'latest'
+        TEMPEST_PLACEMENT_MIN_MICROVERSION: 'latest'
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal from stable/victoria on.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-slow
+    parent: tempest-multinode-full
+    description: |
+      This multinode integration job will run all the tests tagged as slow.
+      It enables the lvm multibackend setup to cover few scenario tests.
+      This job will run only slow tests (API or Scenario) serially.
+
+      Former names for this job were:
+        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+        * tempest-scenario-multinode-lvm-multibackend
+    timeout: 10800
+    # This job runs on stable/stein onwards.
+    branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+    vars: &tempest_slow_vars
+      tox_envlist: slow-serial
+      devstack_localrc:
+        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+      tempest_concurrency: 2
+    group-vars:
+      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+      # the controller and subnode prior to Rocky so we have to make sure the
+      # variable is set in both locations.
+      subnode:
+        devstack_localrc:
+          ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+    name: tempest-slow-py3
+    parent: tempest-multinode-full-py3
+    # This job version is with swift enabled on py3
+    # as swift is ready on py3 from stable/ussuri onwards.
+    timeout: 10800
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    vars: *tempest_slow_vars
+
+- job:
+    name: tempest-cinder-v2-api
+    parent: devstack-tempest
+    # NOTE(gmann): Cinder v2 APIs are available until
+    # stable/wallaby only.
+    override-checkout: stable/wallaby
+    description: |
+      This job runs the cinder API test against v2 endpoint.
+    vars:
+      tox_envlist: all
+      tempest_test_regex: api.*volume
+      devstack_localrc:
+        TEMPEST_VOLUME_TYPE: volumev2
+
+- job:
+    name: tempest-pg-full
+    parent: tempest-full-py3
+    description: |
+      Base integration test with Neutron networking and PostgreSQL.
+      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+    vars:
+      devstack_localrc:
+        # TODO(gmann): Enable File injection tests once nova bug is fixed
+        # https://bugs.launchpad.net/nova/+bug/1882421
+        # ENABLE_FILE_INJECTION: true
+        DATABASE_TYPE: postgresql
+
+- project-template:
+    name: integrated-gate-networking
+    description: |
+      Run the python3 Tempest network integration tests (Nova and Neutron related)
+      in check and gate for the neutron integrated gate. This is meant to be
+      run on neutron gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+
+- project-template:
+    name: integrated-gate-compute
+    description: |
+      Run the python3 Tempest compute integration tests
+      (Nova, Neutron, Cinder and Glance related) in check and gate
+      for the Nova integrated gate. This is meant to be
+      run on Nova gate only.
+    check:
+      jobs:
+        - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
+    gate:
+      jobs:
+        - tempest-integrated-compute
+        - tempest-integrated-compute-centos-8-stream
+
+- project-template:
+    name: integrated-gate-placement
+    description: |
+      Run the python3 Tempest placement integration tests
+      (Nova and Neutron related) in check and gate
+      for the Placement integrated gate. This is meant to be
+      run on Placement gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+
+- project-template:
+    name: integrated-gate-storage
+    description: |
+      Run the python3 Tempest image & block storage integration tests
+      (Cinder, Glance, Swift and Nova related) in check and gate
+      for the neutron integrated gate. This is meant to be
+      run on Cinder and Glance gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+
+- project-template:
+    name: integrated-gate-object-storage
+    description: |
+      Run the python3 Tempest object storage integration tests
+      (Swift, Cinder and Glance related) in check and gate
+      for the swift integrated gate. This is meant to be
+      run on swift gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..36f4920
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,177 @@
+- project:
+    templates:
+      - check-requirements
+      - integrated-gate-py3
+      - openstack-cover-jobs
+      - publish-openstack-docs-pti
+      - release-notes-jobs-python3
+    check:
+      jobs:
+        - openstack-tox-pep8
+        - openstack-tox-py36
+        - openstack-tox-py37
+        - openstack-tox-py38
+        - openstack-tox-py39
+        - tempest-full-parallel:
+            # Define list of irrelevant files to use everywhere else
+            irrelevant-files: &tempest-irrelevant-files
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-ipv6:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - glance-multistore-cinder-import:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-xena:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-wallaby-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-victoria-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ussuri-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-train-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-tox-plugin-sanity-check:
+            irrelevant-files: &tempest-irrelevant-files-2
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/verify-ipv6-only-deployments.sh
+              - ^tools/with_venv.sh
+              # tools/ is not here since this relies on a script in tools/.
+        - tempest-ipv6-only:
+            irrelevant-files: &tempest-irrelevant-files-3
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/check_logs.py
+              - ^tools/format.sh
+              - ^tools/generate-tempest-plugins-list.py
+              - ^tools/generate-tempest-plugins-list.sh
+              - ^tools/skip_tracker.py
+              - ^tools/tempest-integrated-gate-compute-exclude-list.txt
+              - ^tools/tempest-integrated-gate-networking-exclude-list.txt
+              - ^tools/tempest-integrated-gate-object-storage-exclude-list.txt
+              - ^tools/tempest-integrated-gate-placement-exclude-list.txt
+              - ^tools/tempest-integrated-gate-storage-blacklist.txt
+              - ^tools/tempest-integrated-gate-storage-exclude-list.txt
+              - ^tools/tempest-plugin-sanity.sh
+              - ^tools/with_venv.sh
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-live-migration:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-ovs-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-ovs-tempest-dvr:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - interop-tempest-consistency:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-no-admin-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - openstack-tox-bashate:
+            irrelevant-files: *tempest-irrelevant-files-2
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
+    gate:
+      jobs:
+        - openstack-tox-pep8
+        - openstack-tox-py36
+        - openstack-tox-py37
+        - openstack-tox-py38
+        - openstack-tox-py39
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-ovs-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-centos-8-stream:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-ipv6-only:
+            irrelevant-files: *tempest-irrelevant-files-3
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+    experimental:
+      jobs:
+        - tempest-with-latest-microversion
+        - tempest-stestr-master
+        - tempest-cinder-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-all:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-ovs-tempest-dvr-ha-multinode-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-tempest-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - cinder-tempest-lvm-multibackend:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-pg-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-opensuse15:
+            irrelevant-files: *tempest-irrelevant-files
+    periodic-stable:
+      jobs:
+        - tempest-full-xena
+        - tempest-full-wallaby-py3
+        - tempest-full-victoria-py3
+        - tempest-full-ussuri-py3
+        - tempest-full-train-py3
+    periodic:
+      jobs:
+        - tempest-all
+        - tempest-full-oslo-master
+        - tempest-stestr-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
new file mode 100644
index 0000000..e682457
--- /dev/null
+++ b/zuul.d/stable-jobs.yaml
@@ -0,0 +1,186 @@
+# NOTE(gmann): This file includes all stable release jobs definition.
+- job:
+    name: tempest-full-xena
+    parent: tempest-full-py3
+    override-checkout: stable/xena
+
+- job:
+    name: tempest-full-wallaby-py3
+    parent: tempest-full-py3
+    override-checkout: stable/wallaby
+
+- job:
+    name: tempest-full-victoria-py3
+    parent: tempest-full-py3
+    override-checkout: stable/victoria
+
+- job:
+    name: tempest-full-ussuri-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/ussuri
+
+- job:
+    name: tempest-full-train-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/train
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
+    # This job version is with swift disabled on py3
+    # as swift was not ready on py3 until stable/train.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+      - stable/stein
+      - stable/train
+    description: |
+      Base integration test with Neutron networking, swift disabled, and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        # NOTE(mriedem): Disable the cinder-backup service from
+        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+        # project template but the backup tests do not really involve other
+        # services so they should be run in some more cinder-specific job,
+        # especially because the tests fail at a high rate (see bugs 1483434,
+        # 1813217, 1745168)
+        c-bak: false
+        neutron-placement: true
+        neutron-qos: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-bionic
+    # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+    # This job is prepared to make sure all stable branches from stable/stein till stable/train
+    # will keep running on bionic. This can be removed once stable/train is EOL.
+    branches:
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-xenial
+    # This job runs on Xenial and this is for stable/pike, stable/queens
+    # and stable/rocky. This job is prepared to make sure all stable branches
+    # before stable/stein will keep running on xenial. This job can be
+    # removed once stable/rocky is EOL.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-slow
+    parent: tempest-multinode-full
+    description: |
+      This multinode integration job will run all the tests tagged as slow.
+      It enables the lvm multibackend setup to cover few scenario tests.
+      This job will run only slow tests (API or Scenario) serially.
+      Former names for this job were:
+        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+        * tempest-scenario-multinode-lvm-multibackend
+    timeout: 10800
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+    vars:
+      tox_envlist: slow-serial
+      devstack_localrc:
+        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+        ENABLE_VOLUME_MULTIATTACH: true
+        # to avoid https://bugs.launchpad.net/neutron/+bug/1914037
+        # as we couldn't backport the fix to rocky and older releases
+        IPV6_PUBLIC_RANGE: 2001:db8:0:10::/64
+        IPV6_PUBLIC_NETWORK_GATEWAY: 2001:db8:0:10::2
+        IPV6_ROUTER_GW_IP: 2001:db8:0:10::1
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+      tempest_concurrency: 2
+    group-vars:
+      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+      # the controller and subnode prior to Rocky so we have to make sure the
+      # variable is set in both locations.
+      subnode:
+        devstack_localrc:
+          ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+    name: tempest-slow-py3
+    parent: tempest-slow
+    # This job version is with swift disabled on py3
+    # as swift was not ready on py3 until stable/train.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+      - stable/stein
+      - stable/train
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
new file mode 100644
index 0000000..051d8b0
--- /dev/null
+++ b/zuul.d/tempest-specific.yaml
@@ -0,0 +1,143 @@
+# NOTE(gmann): This file includes all tempest specific jobs definition which
+# are supposed to be run by Tempest gate only.
+- job:
+    name: tempest-full-oslo-master
+    parent: tempest-full-py3
+    description: |
+      Integration test using current git of oslo libs.
+      This ensures that when oslo libs get released that they
+      do not break OpenStack server projects.
+
+      Former name for this job was
+      periodic-tempest-dsvm-oslo-latest-full-master.
+    timeout: 10800
+    required-projects:
+      - opendev.org/openstack/oslo.cache
+      - opendev.org/openstack/oslo.concurrency
+      - opendev.org/openstack/oslo.config
+      - opendev.org/openstack/oslo.context
+      - opendev.org/openstack/oslo.db
+      - opendev.org/openstack/oslo.i18n
+      - opendev.org/openstack/oslo.log
+      - opendev.org/openstack/oslo.messaging
+      - opendev.org/openstack/oslo.middleware
+      - opendev.org/openstack/oslo.policy
+      - opendev.org/openstack/oslo.privsep
+      - opendev.org/openstack/oslo.reports
+      - opendev.org/openstack/oslo.rootwrap
+      - opendev.org/openstack/oslo.serialization
+      - opendev.org/openstack/oslo.service
+      - opendev.org/openstack/oslo.utils
+      - opendev.org/openstack/oslo.versionedobjects
+      - opendev.org/openstack/oslo.vmware
+
+- job:
+    name: tempest-full-parallel
+    parent: tempest-full-py3
+    voting: false
+    branches:
+      - master
+    description: |
+      Base integration test with Neutron networking.
+      It includes all scenarios as it was in the past.
+      This job runs all scenario tests in parallel!
+    timeout: 9000
+    vars:
+      tox_envlist: full-parallel
+      run_tempest_cleanup: true
+      run_tempest_dry_cleanup: true
+      devstack_localrc:
+        DEVSTACK_PARALLEL: True
+
+- job:
+    name: tempest-full-py3-ipv6
+    parent: devstack-tempest-ipv6
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking, IPv6 and py3.
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3-opensuse15
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-opensuse-15
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on openSUSE Leap 15.x
+    voting: false
+
+- job:
+    name: tempest-full-py3-centos-8-stream
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-centos-8-stream
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on CentOS 8 stream
+    vars:
+      # Required until bug/1949606 is resolved when using libvirt and QEMU
+      # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
+      configure_swap_size: 4096
+
+- job:
+    name: tempest-tox-plugin-sanity-check
+    parent: tox
+    description: |
+      Run tempest plugin sanity check script using tox.
+    nodeset: ubuntu-focal
+    vars:
+      tox_envlist: plugin-sanity-check
+    timeout: 5000
+
+- job:
+    name: tempest-full-test-account-py3
+    parent: tempest-full-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials instead of dynamic credentials and py3.
+      Former names for this job were:
+        - legacy-tempest-dsvm-full-test-accounts
+        - legacy-tempest-dsvm-neutron-full-test-accounts
+        - legacy-tempest-dsvm-identity-v3-test-accounts
+    vars:
+      devstack_localrc:
+        TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+    name: tempest-full-test-account-no-admin-py3
+    parent: tempest-full-test-account-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials and py3 without having an admin account.
+      Former name for this job was:
+        - legacy-tempest-dsvm-neutron-full-non-admin
+
+    vars:
+      devstack_localrc:
+        TEMPEST_HAS_ADMIN: False
+
+- job:
+    name: tempest-stestr-master
+    parent: devstack-tempest
+    description: |
+      Smoke integration test with stestr master.
+      This ensures that new stestr release does
+      not break Temepst.
+    vars:
+      tox_envlist: stestr-master
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false