Merge "Add volume services schema for microversion 3.7"
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index 9c53ba9..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,748 +0,0 @@
-- job:
-    name: devstack-tempest
-    parent: devstack
-    description: |
-      Base Tempest job.
-
-      This Tempest job provides the base for both the single and multi-node
-      test setup. To run a multi-node test inherit from devstack-tempest and
-      set the nodeset to a multi-node one.
-    required-projects: &base_required-projects
-      - opendev.org/openstack/tempest
-    timeout: 7200
-    roles: &base_roles
-      - zuul: opendev.org/openstack/devstack
-    vars: &base_vars
-      devstack_services:
-        tempest: true
-      devstack_local_conf:
-        test-config:
-          $TEMPEST_CONFIG:
-            compute:
-              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
-      test_results_stage_name: test_results
-      zuul_copy_output:
-        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
-        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
-        '{{ devstack_base_dir }}/tempest/tempest.log': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
-        '{{ stage_dir }}/stackviz': logs
-      extensions_to_txt:
-        conf: true
-        log: true
-        yaml: true
-        yml: true
-    run: playbooks/devstack-tempest.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-all
-    parent: devstack-tempest
-    description: |
-      Integration test that runs all tests.
-      Former name for this job was:
-        * legacy-periodic-tempest-dsvm-all-master
-    vars:
-      tox_envlist: all
-      tempest_test_regex: tempest
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-
-- job:
-    name: devstack-tempest-ipv6
-    parent: devstack-ipv6
-    description: |
-      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
-      which set the IPv6-only setting for OpenStack services. As part of
-      run phase, this job will verify the IPv6 setting and check the services
-      endpoints and listen addresses are IPv6. Basically it will run the script
-      ./tool/verify-ipv6-only-deployments.sh
-
-      Child jobs of this job can run their own set of tests and can
-      add post-run playebooks to extend the IPv6 verification specific
-      to their deployed services.
-      Check the wiki page for more details about project jobs setup
-      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
-    required-projects: *base_required-projects
-    timeout: 7200
-    roles: *base_roles
-    vars: *base_vars
-    run: playbooks/devstack-tempest-ipv6.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-ipv6-only
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Integration test of IPv6-only deployments. This job runs
-      smoke and IPv6 relates tests only. Basic idea is to test
-      whether OpenStack Services listen on IPv6 addrress or not.
-    timeout: 10800
-    vars:
-      tox_envlist: ipv6-only
-
-- job:
-    name: tempest-full
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py27.
-      Former names for this job where:
-        * legacy-tempest-dsvm-neutron-full
-        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        USE_PYTHON3: False
-      devstack_services:
-        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
-        # since tempest-full is in the integrated-gate project template but
-        # the backup tests do not really involve other services so they should
-        # be run in some more cinder-specific job, especially because the
-        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-full-oslo-master
-    parent: tempest-full
-    description: |
-      Integration test using current git of oslo libs.
-      This ensures that when oslo libs get released that they
-      do not break OpenStack server projects.
-
-      Former name for this job was
-      periodic-tempest-dsvm-oslo-latest-full-master.
-    timeout: 10800
-    required-projects:
-      - opendev.org/openstack/oslo.cache
-      - opendev.org/openstack/oslo.concurrency
-      - opendev.org/openstack/oslo.config
-      - opendev.org/openstack/oslo.context
-      - opendev.org/openstack/oslo.db
-      - opendev.org/openstack/oslo.i18n
-      - opendev.org/openstack/oslo.log
-      - opendev.org/openstack/oslo.messaging
-      - opendev.org/openstack/oslo.middleware
-      - opendev.org/openstack/oslo.policy
-      - opendev.org/openstack/oslo.privsep
-      - opendev.org/openstack/oslo.reports
-      - opendev.org/openstack/oslo.rootwrap
-      - opendev.org/openstack/oslo.serialization
-      - opendev.org/openstack/oslo.service
-      - opendev.org/openstack/oslo.utils
-      - opendev.org/openstack/oslo.versionedobjects
-      - opendev.org/openstack/oslo.vmware
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: tempest-full-parallel
-    parent: tempest-full
-    voting: false
-    branches:
-      - master
-    description: |
-      Base integration test with Neutron networking.
-      It includes all scenarios as it was in the past.
-      This job runs all scenario tests in parallel!
-    timeout: 9000
-    vars:
-      tox_envlist: full-parallel
-      run_tempest_cleanup: true
-      run_tempest_dry_cleanup: true
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: tempest-full-py3
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py3.
-      Former names for this job where:
-        * legacy-tempest-dsvm-py35
-        * gate-tempest-dsvm-py35
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        GLANCE_USE_IMPORT_WORKFLOW: True
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        # NOTE(mriedem): Disable the cinder-backup service from
-        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
-        # project template but the backup tests do not really involve other
-        # services so they should be run in some more cinder-specific job,
-        # especially because the tests fail at a high rate (see bugs 1483434,
-        # 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-integrated-networking
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This  job runs integration tests for networking. This is subset of
-      'tempest-full' job and run only Neutron and Nova related tests.
-      This is meant to be run on neutron gate only.
-    vars:
-      tox_envlist: integrated-network
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-compute
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for compute. This is
-      subset of 'tempest-full' job and run Nova, Neutron, Cinder (except backup tests)
-      and Glance related tests. This is meant to be run on Nova gate only.
-    vars:
-      tox_envlist: integrated-compute
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-placement
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for placement. This is
-      subset of 'tempest-full' job and run Nova and Neutron
-      related tests. This is meant to be run on Placement gate only.
-    vars:
-      tox_envlist: integrated-placement
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for image & block storage. This is
-      subset of 'tempest-full' job and run Cinder, Glance, Swift and Nova
-      related tests. This is meant to be run on Cinder and Glance gate only.
-    vars:
-      tox_envlist: integrated-storage
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        GLANCE_USE_IMPORT_WORKFLOW: True
-
-- job:
-    name: tempest-integrated-object-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for object storage. This is
-      subset of 'tempest-full' job and run Swift, Cinder and Glance
-      related tests. This is meant to be run on Swift gate only.
-    vars:
-      tox_envlist: integrated-object-storage
-      devstack_localrc:
-        # NOTE(gmann): swift is not ready on python3 yet and devstack
-        # install it on python2.7 only. But settting the USE_PYTHON3
-        # for future once swift is ready on py3.
-        USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-ipv6
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking, IPv6 and py3.
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-
-- job:
-    name: tempest-multinode-full-base
-    parent: devstack-tempest
-    description: |
-      Base multinode integration test with Neutron networking and py27.
-      Former names for this job were:
-        * neutron-tempest-multinode-full
-        * legacy-tempest-dsvm-neutron-multinode-full
-        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
-      This job includes two nodes, controller / tempest plus a subnode, but
-      it can be used with different topologies, as long as a controller node
-      and a tempest one exist.
-    timeout: 10800
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        FORCE_CONFIG_DRIVE: false
-        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-        LIVE_MIGRATION_AVAILABLE: true
-        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-    group-vars:
-      peers:
-        devstack_localrc:
-          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-          LIVE_MIGRATION_AVAILABLE: true
-          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-bionic
-    # This job runs on Bionic from stable/stein on.
-    branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-xenial
-    # This job runs on Xenial and this is for stable/pike, stable/queens
-    # and stable/rocky. This job is prepared to make sure all stable branches
-    # before stable/stein will keep running on xenial. This job can be
-    # removed once stable/rocky is EOL.
-    branches:
-      - stable/pike
-      - stable/queens
-      - stable/rocky
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-opensuse15
-    parent: tempest-full-py3
-    nodeset: devstack-single-node-opensuse-15
-    description: |
-      Base integration test with Neutron networking and py36 running
-      on openSUSE Leap 15.x
-    voting: false
-
-- job:
-    name: tempest-slow
-    parent: tempest-multinode-full
-    description: |
-      This multinode integration job will run all the tests tagged as slow.
-      It enables the lvm multibackend setup to cover few scenario tests.
-      This job will run only slow tests (API or Scenario) serially.
-
-      Former names for this job were:
-        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
-        * tempest-scenario-multinode-lvm-multibackend
-    timeout: 10800
-    vars:
-      tox_envlist: slow-serial
-      devstack_localrc:
-        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron
-      devstack_services:
-        neutron-placement: true
-        neutron-qos: true
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
-      tempest_concurrency: 2
-    group-vars:
-      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
-      # the controller and subnode prior to Rocky so we have to make sure the
-      # variable is set in both locations.
-      subnode:
-        devstack_localrc:
-          ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
-    name: tempest-slow-py3
-    parent: tempest-slow
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-ussuri-py3
-    parent: tempest-full-py3
-    override-checkout: stable/ussuri
-
-- job:
-    name: tempest-full-train-py3
-    parent: tempest-full-py3
-    override-checkout: stable/train
-
-- job:
-    name: tempest-full-stein-py3
-    parent: tempest-full-py3
-    override-checkout: stable/stein
-
-- job:
-    name: tempest-tox-plugin-sanity-check
-    parent: tox
-    description: |
-      Run tempest plugin sanity check script using tox.
-    nodeset: ubuntu-bionic
-    vars:
-      tox_envlist: plugin-sanity-check
-    timeout: 5000
-
-- job:
-    name: tempest-cinder-v2-api
-    parent: devstack-tempest
-    branches:
-      - master
-    description: |
-      This job runs the cinder API test against v2 endpoint.
-    vars:
-      tox_envlist: all
-      tempest_test_regex: api.*volume
-      devstack_localrc:
-        TEMPEST_VOLUME_TYPE: volumev2
-
-- job:
-    name: tempest-full-test-account-py3
-    parent: tempest-full-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials instead of dynamic credentials and py3.
-      Former names for this job were:
-        - legacy-tempest-dsvm-full-test-accounts
-        - legacy-tempest-dsvm-neutron-full-test-accounts
-        - legacy-tempest-dsvm-identity-v3-test-accounts
-    vars:
-      devstack_localrc:
-        TEMPEST_USE_TEST_ACCOUNTS: True
-
-- job:
-    name: tempest-full-test-account-no-admin-py3
-    parent: tempest-full-test-account-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials and py3 without having an admin account.
-      Former name for this job was:
-        - legacy-tempest-dsvm-neutron-full-non-admin
-
-    vars:
-      devstack_localrc:
-        TEMPEST_HAS_ADMIN: False
-
-- job:
-    name: tempest-pg-full
-    parent: tempest-full
-    description: |
-      Base integration test with Neutron networking and PostgreSQL.
-      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
-    vars:
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        DATABASE_TYPE: postgresql
-        USE_PYTHON3: True
-
-- project-template:
-    name: integrated-gate-networking
-    description: |
-      Run the python3 Tempest network integration tests (Nova and Neutron related)
-      in check and gate for the neutron integrated gate. This is meant to be
-      run on neutron gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-networking
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-networking
-
-- project-template:
-    name: integrated-gate-compute
-    description: |
-      Run the python3 Tempest compute integration tests
-      (Nova, Neutron, Cinder and Glance related) in check and gate
-      for the Nova integrated gate. This is meant to be
-      run on Nova gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-compute
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-compute
-
-- project-template:
-    name: integrated-gate-placement
-    description: |
-      Run the python3 Tempest placement integration tests
-      (Nova and Neutron related) in check and gate
-      for the Placement integrated gate. This is meant to be
-      run on Placement gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-placement
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-placement
-
-- project-template:
-    name: integrated-gate-storage
-    description: |
-      Run the python3 Tempest image & block storage integration tests
-      (Cinder, Glance, Swift and Nova related) in check and gate
-      for the neutron integrated gate. This is meant to be
-      run on Cinder and Glance gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-storage
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-storage
-
-- project-template:
-    name: integrated-gate-object-storage
-    description: |
-      Run the python3 Tempest object storage integration tests
-      (Swift, Cinder and Glance related) in check and gate
-      for the swift integrated gate. This is meant to be
-      run on swift gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-object-storage
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-object-storage
-
-- project:
-    templates:
-      - check-requirements
-      - integrated-gate-py3
-      - openstack-cover-jobs
-      - openstack-python3-victoria-jobs
-      - publish-openstack-docs-pti
-      - release-notes-jobs-python3
-    check:
-      jobs:
-        - devstack-tempest:
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - devstack-tempest-ipv6:
-            voting: false
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - tempest-full-parallel:
-            # Define list of irrelevant files to use everywhere else
-            irrelevant-files: &tempest-irrelevant-files
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^tools/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-ipv6:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-ussuri-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-train-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-stein-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-multinode-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-tox-plugin-sanity-check:
-            irrelevant-files: &tempest-irrelevant-files-2
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-              # tools/ is not here since this relies on a script in tools/.
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-live-migration:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade:
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr:
-            irrelevant-files: *tempest-irrelevant-files
-        - interop-tempest-consistency:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-no-admin-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - openstack-tox-bashate:
-            irrelevant-files: *tempest-irrelevant-files-2
-    gate:
-      jobs:
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
-    experimental:
-      jobs:
-        - tempest-cinder-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-all:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr-ha-multinode-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-tempest-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - cinder-tempest-lvm-multibackend:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-pg-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-opensuse15:
-            irrelevant-files: *tempest-irrelevant-files
-    periodic-stable:
-      jobs:
-        - tempest-full-ussuri-py3
-        - tempest-full-train-py3
-        - tempest-full-stein-py3
-    periodic:
-      jobs:
-        - tempest-all
-        - tempest-full-oslo-master
diff --git a/REVIEWING.rst b/REVIEWING.rst
index e07e358..4c63aa0 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -160,13 +160,11 @@
 When to approve
 ---------------
 * It's OK to hold off on an approval until a subject matter expert reviews it.
-* Every patch needs two +2's before being approved.
-* However, a single Tempest core reviewer can approve patches without waiting
-  for another +2 in the following cases:
+* Every patch needs at least single +2's before being approved. A single
+  Tempest core reviewer can approve patches but can always wait for another
+  +2 in any case. Following cases where single +2 can be used without any
+  issue:
 
-  * If a patch has already been approved but requires a trivial rebase to
-    merge, then there is no need to wait for a second +2, since the patch has
-    already had two +2's.
   * If any trivial patch set fixes one of the items below:
 
     * Documentation or code comment typo
@@ -187,7 +185,4 @@
     voting ``tempest-tox-plugin-sanity-check`` job) and unblock the
     tempest gate
 
-  Note that such a policy should be used judiciously, as we should strive to
-  have two +2's on each patch set, prior to approval.
-
 .. _example: https://review.opendev.org/#/c/611032/
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2a3edf4..b738fde 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -24,6 +24,7 @@
 
 import os
 import subprocess
+import sys
 
 # Build the plugin registry
 def build_plugin_registry(app):
@@ -31,16 +32,20 @@
         os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
     subprocess.call(['tools/generate-tempest-plugins-list.sh'], cwd=root_dir)
 
+def autodoc_skip_member_handler(app, what, name, obj, skip, options):
+    return skip or (what == "class" and not name.startswith("test"))
+
 def setup(app):
+    app.connect('autodoc-skip-member', autodoc_skip_member_handler)
     if os.getenv('GENERATE_TEMPEST_PLUGIN_LIST', 'true').lower() == 'true':
         app.connect('builder-inited', build_plugin_registry)
 
-
-
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
+# sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath('../../tempest'))
+sys.path.insert(0, os.path.abspath('../../tempest/api'))
 
 # -- General configuration -----------------------------------------------------
 
@@ -81,7 +86,7 @@
 master_doc = 'index'
 
 # General information about the project.
-copyright = u'2013, OpenStack QA Team'
+copyright = '2013, OpenStack QA Team'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -201,6 +206,13 @@
 # (source start file, target name, title, author, documentclass
 # [howto/manual]).
 latex_documents = [
-    ('index', 'doc-tempest.tex', u'Tempest Testing Project',
-     u'OpenStack Foundation', 'manual'),
+    ('index', 'doc-tempest.tex', 'Tempest Testing Project',
+     'OpenStack Foundation', 'manual'),
 ]
+
+latex_use_xindy = False
+
+latex_elements = {
+    'maxlistdepth': 20,
+    'printindex': '\\footnotesize\\raggedright\\printindex'
+}
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index c43e420..c790c5f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -153,6 +153,11 @@
 
 Pre-Provisioned Credentials are also known as accounts.yaml or accounts file.
 
+Keystone Scopes & Roles Support in Tempest
+""""""""""""""""""""""""""""""""""""""""""
+For details on scope and roles support in Tempest,
+please refer to :doc:`this document <keystone_scopes_and_roles_support>`
+
 Compute
 -------
 
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 9c79a1f..139f0b7 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -13,7 +13,7 @@
 
 Communication
 ~~~~~~~~~~~~~
-* IRC channel ``#openstack-qa`` at FreeNode
+* IRC channel ``#openstack-qa`` at OFTC
 * Mailing list (prefix subjects with ``[qa]`` for faster responses)
   http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
 
@@ -43,10 +43,9 @@
 
 Getting Your Patch Merged
 ~~~~~~~~~~~~~~~~~~~~~~~~~
-All changes proposed to the Tempest require two ``Code-Review +2`` votes from
-Tempest core reviewers before one of the core reviewers can approve the patch by
-giving ``Workflow +1`` vote. More detailed guidelines for reviewers are available
-at :doc:`../REVIEWING`.
+All changes proposed to the Tempest require single ``Code-Review +2`` votes from
+Tempest core reviewers by giving ``Workflow +1`` vote. More detailed guidelines
+for reviewers are available at :doc:`../REVIEWING`.
 
 Project Team Lead Duties
 ~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/data/tempest-blacklisted-plugins-registry.header b/doc/source/data/tempest-non-active-plugins-registry.header
similarity index 67%
rename from doc/source/data/tempest-blacklisted-plugins-registry.header
rename to doc/source/data/tempest-non-active-plugins-registry.header
index 6b6af11..06d8eaa 100644
--- a/doc/source/data/tempest-blacklisted-plugins-registry.header
+++ b/doc/source/data/tempest-non-active-plugins-registry.header
@@ -1,7 +1,7 @@
-Blacklisted Plugins
+Non Active Plugins
 ===================
 
 List of Tempest plugin projects that are stale or unmaintained for a long
-time (6 months or more). They can be moved out of blacklist state once one
+time (6 months or more). They can be moved out of nonactivelist state once one
 of the relevant patches gets merged:
 https://review.opendev.org/#/q/topic:tempest-sanity-gate+%28status:open%29
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d4dc166..2f29cf2 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -56,6 +56,13 @@
 
    supported_version
 
+Description of Tests
+--------------------
+.. toctree::
+   :maxdepth: 2
+
+   tests/modules
+
 For Contributors
 ================
 
@@ -98,6 +105,14 @@
 
    tempest_and_plugins_compatible_version_policy
 
+Keystone Scopes & Roles Support in Tempest
+------------------------------------------
+
+.. toctree::
+   :maxdepth: 2
+
+   keystone_scopes_and_roles_support
+
 Stable Branch Support Policy
 ----------------------------
 
diff --git a/doc/source/keystone_scopes_and_roles_support.rst b/doc/source/keystone_scopes_and_roles_support.rst
new file mode 100644
index 0000000..f446f8c
--- /dev/null
+++ b/doc/source/keystone_scopes_and_roles_support.rst
@@ -0,0 +1,286 @@
+Keystone Scopes & Roles Support in Tempest
+==========================================
+
+OpenStack Keystone supports different scopes in token, refer to the
+`Keystone doc <https://docs.openstack.org/keystone/latest/admin/tokens-overview.html#authorization-scopes>`_.
+Along with the scopes, keystone supports default roles, one of which
+is a reader role, for details refer to
+`this keystone document <https://docs.openstack.org/keystone/latest/admin/service-api-protection.html>`_.
+
+Tempest supports those scopes and roles credentials that can be used
+to test APIs under different scope and roles.
+
+Dynamic Credentials
+-------------------
+
+Dynamic credential supports all the below set of personas and allows
+you to generate credentials tailored to a specific persona that you
+can use in your test.
+
+Domain scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. Domain Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_admin_client = (
+                     cls.os_domain_admin.availability_zone_client)
+
+  #. Domain Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_member_client = (
+                     cls.os_domain_member.availability_zone_client)
+
+  #. Domain Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['domain_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_reader_client = (
+                     cls.os_domain_reader.availability_zone_client)
+
+  #. Domain other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``domain`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'domain' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['domain_my_role1', 'my_own_role1', 'admin']
+                            ['domain_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_d_role1_client = (
+                     cls.os_domain_my_role1.availability_zone_client)
+                 cls.az_d_role2_client = (
+                     cls.os_domain_my_role2.availability_zone_client)
+
+System scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. System Admin: This is supported and can be requested and used from the
+     test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_admin_client = (
+                     cls.os_system_admin.availability_zone_client)
+
+  #. System Member: This is supported and can be requested and used from the
+     test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_member_client = (
+                     cls.os_system_member.availability_zone_client)
+
+  #. System Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['system_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_reader_client = (
+                     cls.os_system_reader.availability_zone_client)
+
+  #. System other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``system`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'project' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['system_my_role1', 'my_own_role1', 'admin']
+                            ['system_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_s_role1_client = (
+                     cls.os_system_my_role1.availability_zone_client)
+                 cls.az_s_role2_client = (
+                     cls.os_system_my_role2.availability_zone_client)
+
+Project scoped personas:
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+  #. Project Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_admin_client = (
+                     cls.os_project_admin.availability_zone_client)
+
+  #. Project Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_member_client = (
+                     cls.os_project_member.availability_zone_client)
+
+  #. Project Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_reader_client = (
+                     cls.os_project_reader.availability_zone_client)
+
+  #. Project alternate Admin: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_admin']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_admin_client = (
+                     cls.os_project_alt_admin.availability_zone_client)
+
+  #. Project alternate Member: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_member']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_member_client = (
+                     cls.os_project_alt_member.availability_zone_client)
+
+  #. Project alternate Reader: This is supported and can be requested and used from
+     the test as below:
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = ['project_alt_reader']
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_p_alt_reader_client = (
+                     cls.os_project_alt_reader.availability_zone_client)
+
+  #. Project other roles: This is supported and can be requested and used from
+     the test as below:
+
+     You need to use the ``project`` as the prefix in credentials type, and
+     based on that, Tempest will create test users under 'project' scope.
+
+     .. code-block:: python
+
+         class TestDummy(base.DummyBaseTest):
+
+             credentials = [['project_my_role1', 'my_own_role1', 'admin']
+                            ['project_my_role2', 'my_own_role2']]
+
+             @classmethod
+             def setup_clients(cls):
+                 super(TestDummy, cls).setup_clients()
+                 cls.az_role1_client = (
+                     cls.os_project_my_role1.availability_zone_client)
+                 cls.az_role2_client = (
+                     cls.os_project_my_role2.availability_zone_client)
+
+Pre-Provisioned Credentials
+---------------------------
+
+Pre-Provisioned credentials support the below set of personas and can be
+used in the test as shown above in the ``Dynamic Credentials`` Section.
+
+* Domain Admin
+* Domain Member
+* Domain Reader
+* System Admin
+* System Member
+* System Reader
+* Project Admin
+* Project Member
+* Project Reader
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 5bc0eac..0b80b72 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
 
 .. code-block:: python
 
-    class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+   class BaseTestCase1(api_version_utils.BaseMicroversionTest):
 
-        [..]
-    @classmethod
-    def skip_checks(cls):
-        super(BaseTestCase1, cls).skip_checks()
-        api_version_utils.check_skip_with_microversion(cls.min_microversion,
-                                                       cls.max_microversion,
-                                                       CONF.compute.min_microversion,
-                                                       CONF.compute.max_microversion)
+       [..]
+       @classmethod
+       def skip_checks(cls):
+           super(BaseTestCase1, cls).skip_checks()
+           api_version_utils.check_skip_with_microversion(cls.min_microversion,
+                                                          cls.max_microversion,
+                                                          CONF.compute.min_microversion,
+                                                          CONF.compute.max_microversion)
 
 Skip logic can be added in tests base class or any specific test class depends on
 tests class structure.
@@ -302,6 +302,10 @@
 
   .. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
 
+  * `2.3`_
+
+  .. _2.3: http://docs.openstack.org/developer/nova/api_microversion_history.html#maximum-in-kilo
+
   * `2.6`_
 
   .. _2.6: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id5
@@ -352,15 +356,15 @@
 
   * `2.37`_
 
-  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
+  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
 
   * `2.39`_
 
-  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
+  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id36
 
   * `2.41`_
 
-  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id37
+  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id38
 
   * `2.42`_
 
@@ -368,15 +372,15 @@
 
   * `2.47`_
 
-  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
 
   * `2.48`_
 
-  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
 
   * `2.49`_
 
-  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
 
   * `2.53`_
 
@@ -384,15 +388,15 @@
 
   * `2.54`_
 
-  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
 
   * `2.55`_
 
-  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
+  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
 
   * `2.57`_
 
-  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
+  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53
 
   * `2.59`_
 
@@ -404,24 +408,28 @@
 
   * `2.61`_
 
-  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id56
 
   * `2.63`_
 
-  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
+  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
 
   * `2.70`_
 
-  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id63
+  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
 
   * `2.71`_
 
-  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id65
 
   * `2.73`_
 
   .. _2.73: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id66
 
+  * `2.79`_
+
+  .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train 
+
 * Volume
 
   * `3.3`_
@@ -455,3 +463,7 @@
   * `3.20`_
 
   .. _3.20:  https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id19
+
+  * `3.55`_
+
+  .. _3.55:  https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#maximum-in-rocky
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index e51b90b..2eaf72f 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -113,7 +113,7 @@
    There is also the option to use `stestr`_ directly. For example, from
    the workspace dir run::
 
-    $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
+    $ stestr run --exclude-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
 
    will run the same set of tests as the default gate jobs. Or you can
    use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index ab1b0b1..b1fd6f8 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -31,6 +31,7 @@
 * tempest.common.credentials_factory
 * tempest.clients
 * tempest.test
+* tempest.scenario.manager
 
 If there is an interface from Tempest that you need to rely on in your plugin
 which is not listed above, it likely needs to be migrated to tempest.lib. In
@@ -268,12 +269,12 @@
 
    class MyAPIClient(rest_client.RestClient):
 
-    def __init__(self, auth_provider, service, region,
-                 my_arg, my_arg2=True, **kwargs):
-        super(MyAPIClient, self).__init__(
-            auth_provider, service, region, **kwargs)
-        self.my_arg = my_arg
-        self.my_args2 = my_arg
+       def __init__(self, auth_provider, service, region,
+                    my_arg, my_arg2=True, **kwargs):
+           super(MyAPIClient, self).__init__(
+               auth_provider, service, region, **kwargs)
+           self.my_arg = my_arg
+           self.my_args2 = my_arg
 
 Finally the service client should be structured in a python module, so that all
 service client classes are importable from it. Each major API version should
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 87e3ad1..9c2d1ed 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -20,7 +20,7 @@
 testing branches in these phases, it's possible that we'll introduce changes to
 Tempest on master which will break support on *Extended Maintenance* phase
 branches. When this happens the expectation for those branches is to either
-switch to running Tempest from a tag with support for the branch, or blacklist
+switch to running Tempest from a tag with support for the branch, or exclude
 a newly introduced test (if that is the cause of the issue). Tempest will not
 be creating stable branches to support *Extended Maintenance* phase branches, as
 the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 388b4cd..4ca7f0d 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,9 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Victoria
 * Ussuri
 * Train
-* Stein
 
 For older OpenStack Release:
 
@@ -34,3 +34,4 @@
 
 * Python 3.6
 * Python 3.7
+* Python 3.8
diff --git a/doc/source/tests/modules.rst b/doc/source/tests/modules.rst
new file mode 100644
index 0000000..026a7a5
--- /dev/null
+++ b/doc/source/tests/modules.rst
@@ -0,0 +1,21 @@
+Description of Tests
+====================
+
+OpenStack Services Integration Tests
+------------------------------------
+.. toctree::
+   :maxdepth: 2
+
+   scenario/modules
+
+OpenStack Services API Tests
+----------------------------
+.. toctree::
+   :maxdepth: 2
+
+   compute/modules
+   identity/modules
+   image/modules
+   network/modules
+   object_storage/modules
+   volume/modules
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-    @classmethod
-    def skip_checks(cls):
-        """This section is used to evaluate config early and skip all test
-           methods based on these checks
-        """
-        super(TestExampleCase, cls).skip_checks()
-        if not CONF.section.foo
-            cls.skip('A helpful message')
+      @classmethod
+      def skip_checks(cls):
+          """This section is used to evaluate config early and skip all test
+             methods based on these checks
+          """
+          super(TestExampleCase, cls).skip_checks()
+          if not CONF.section.foo
+              cls.skip('A helpful message')
 
-    @classmethod
-    def setup_credentials(cls):
-        """This section is used to do any manual credential allocation and also
-           in the case of dynamic credentials to override the default network
-           resource creation/auto allocation
-        """
-        # This call is used to tell the credential allocator to not create any
-        # network resources for this test case. It also enables selective
-        # creation of other neutron resources. NOTE: it must go before the
-        # super call
-        cls.set_network_resources()
-        super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          """This section is used to do any manual credential allocation and also
+             in the case of dynamic credentials to override the default network
+             resource creation/auto allocation
+          """
+          # This call is used to tell the credential allocator to not create any
+          # network resources for this test case. It also enables selective
+          # creation of other neutron resources. NOTE: it must go before the
+          # super call
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
-    @classmethod
-    def setup_clients(cls):
-        """This section is used to setup client aliases from the manager object
-           or to initialize any additional clients. Except in a few very
-           specific situations you should not need to use this.
-        """
-        super(TestExampleCase, cls).setup_clients()
-        cls.servers_client = cls.os_primary.servers_client
+      @classmethod
+      def setup_clients(cls):
+          """This section is used to setup client aliases from the manager object
+             or to initialize any additional clients. Except in a few very
+             specific situations you should not need to use this.
+          """
+          super(TestExampleCase, cls).setup_clients()
+          cls.servers_client = cls.os_primary.servers_client
 
-    @classmethod
-    def resource_setup(cls):
-        """This section is used to create any resources or objects which are
-           going to be used and shared by **all** test methods in the
-           TestCase. Note then anything created in this section must also be
-           destroyed in the corresponding resource_cleanup() method (which will
-           be run during tearDownClass())
-        """
-        super(TestExampleCase, cls).resource_setup()
-        cls.shared_server = cls.servers_client.create_server(...)
-        cls.addClassResourceCleanup(waiters.wait_for_server_termination,
-                                    cls.servers_client,
-                                    cls.shared_server['id'])
-        cls.addClassResourceCleanup(
-            test_utils.call_and_ignore_notfound_exc(
-                cls.servers_client.delete_server,
-                cls.shared_server['id']))
+      @classmethod
+      def resource_setup(cls):
+          """This section is used to create any resources or objects which are
+             going to be used and shared by **all** test methods in the
+             TestCase. Note then anything created in this section must also be
+             destroyed in the corresponding resource_cleanup() method (which will
+             be run during tearDownClass())
+          """
+          super(TestExampleCase, cls).resource_setup()
+          cls.shared_server = cls.servers_client.create_server(...)
+          cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+                                      cls.servers_client,
+                                      cls.shared_server['id'])
+          cls.addClassResourceCleanup(
+              test_utils.call_and_ignore_notfound_exc(
+                  cls.servers_client.delete_server,
+                  cls.shared_server['id']))
 
 .. _credentials:
 
@@ -150,9 +150,9 @@
 
         credentials = ['primary', 'admin']
 
-    @classmethod
-    def skip_checks(cls):
-    ...
+        @classmethod
+        def skip_checks(cls):
+            ...
 
 In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
 credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources(network=True, subnet=True, router=False)
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources(network=True, subnet=True, router=False)
+          super(TestExampleCase, cls).setup_credentials()
 
 There are 2 quirks with the usage here. First for the set_network_resources
 function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
 
   class TestExampleCase(test.BaseTestCase):
 
-  @classmethod
-  def setup_credentials(cls):
-      cls.set_network_resources()
-      super(TestExampleCase, cls).setup_credentials()
+      @classmethod
+      def setup_credentials(cls):
+          cls.set_network_resources()
+          super(TestExampleCase, cls).setup_credentials()
 
 This will not allocate any networking resources. This is because by default all
 the arguments default to False.
@@ -282,8 +282,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      self.os_primary.servers_client.create_server(...)
+      def test_example_create_server(self):
+          self.os_primary.servers_client.create_server(...)
 
 is all you need to do. As described previously, in the above example the
 ``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
 
 The credentials object provides access to all of the credential information you
 would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
 
 
   class TestExampleCase(test.BaseTestCase):
-    def test_example_create_server(self):
-      credentials = self.os_primary.credentials
-      username = credentials.username
-      user_id = credentials.user_id
-      password = credentials.password
-      tenant_id = credentials.tenant_id
+      def test_example_create_server(self):
+          credentials = self.os_primary.credentials
+          username = credentials.username
+          user_id = credentials.user_id
+          password = credentials.password
+          tenant_id = credentials.tenant_id
diff --git a/etc/whitelist.yaml b/etc/allow-list.yaml
similarity index 100%
rename from etc/whitelist.yaml
rename to etc/allow-list.yaml
diff --git a/etc/rbac-persona-accounts.yaml.sample b/etc/rbac-persona-accounts.yaml.sample
new file mode 100644
index 0000000..0b59538
--- /dev/null
+++ b/etc/rbac-persona-accounts.yaml.sample
@@ -0,0 +1,108 @@
+- user_domain_name: Default
+  password: password
+  roles:
+    - admin
+  username: tempest-system-admin-1
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-member-1
+  roles:
+    - member
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-reader-1
+  roles:
+    - reader
+  system: all
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-admin-1
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-member-1
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-reader-1
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-admin-1
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-member-1
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-reader-1
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  username: tempest-system-admin-2
+  roles:
+    - admin
+  system: all
+- user_domain_name: Default
+  password: password
+  username: tempest-system-member-2
+  roles:
+    - member
+  system: all
+- user_domain_name: Default
+  password: password
+  system: all
+  username: tempest-system-reader-2
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-admin-2
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-member-2
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  domain_name: tempest-test-domain
+  username: tempest-domain-reader-2
+  roles:
+    - reader
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-admin-2
+  roles:
+    - admin
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-member-2
+  roles:
+    - member
+- user_domain_name: Default
+  password: password
+  project_name: tempest-test-project
+  username: tempest-project-reader-2
+  roles:
+    - reader
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 5f72345..4788362 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -7,11 +7,6 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
   roles:
     - setup-tempest-run-dir
     - setup-tempest-data-dir
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 7ee7411..3b969f2 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,11 +7,6 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
   tasks:
     - name: Setup Tempest Run Directory
       include_role:
@@ -30,9 +25,9 @@
         name: tempest-cleanup
       vars:
         init_saved_state: true
-      when:
-        - run_tempest_dry_cleanup is defined
-        - run_tempest_cleanup is defined
+      when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+            (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
 
     - name: Run Tempest
       include_role:
@@ -43,10 +38,9 @@
         name: tempest-cleanup
       vars:
         dry_run: true
-      when:
-        - run_tempest_dry_cleanup is defined
+      when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
 
     - name: Run tempest cleanup
       include_role:
         name: tempest-cleanup
-      when: run_tempest_cleanup is defined
+      when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
new file mode 100644
index 0000000..33df7c4
--- /dev/null
+++ b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    The following tempest clients for keystone v3 OS_FEDERATION API were
+    implemented in this release
+
+    * identity_providers
+    * protocols
+    * mappings
+    * service_providers
diff --git a/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
new file mode 100644
index 0000000..089569e
--- /dev/null
+++ b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+  - |
+    In this release the following tempest arguments are deprecated and
+    replaced by new ones which are functionally equivalent:
+
+     * --black-regex is replaced by --exclude-regex
+     * --blacklist-file is replaced by --exclude-list
+     * --whitelist-file is replaced by --include-list
+
+    For now Tempest supports both (new and old ones) in order to make the
+    transition for all consumers smoother. However, that's just a temporary
+    case and the old options will be removed soon.
diff --git a/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml b/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml
new file mode 100644
index 0000000..2514e48
--- /dev/null
+++ b/releasenotes/notes/Remove-deprecated-scenario.img_dir-option-da626e6153181e16.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+  - |
+    img_dir scenario option has been deprecated and it's being removed.
+    Starting Tempest 25.0.0 release, CONF.scenario.img_file needs a full path
+    for the image. Until this release, old behavior was maintained and kept
+    working however a user needs to specify the full path in
+    CONF.scenario.img_file config option from now on.
diff --git a/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
new file mode 100644
index 0000000..822df7d
--- /dev/null
+++ b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    In this release tempest/manager.py is removed after more than 4 years
+    of deprecation.
diff --git a/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
new file mode 100644
index 0000000..fb84d25
--- /dev/null
+++ b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    The test_reboot_server_soft has been skipped for more than 6 years.
+    Take into account that the minimum scenario test uses soft reboot
+    and the nova functional test also covers reboot.
diff --git a/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml b/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml
new file mode 100644
index 0000000..8df5f3c
--- /dev/null
+++ b/releasenotes/notes/Stabilize-scenario-manager-adf36d21b08e31a4.yaml
@@ -0,0 +1,31 @@
+---
+prelude: >
+    Tempest.scenario.manager is now a stable interface for Tempest plugins.
+features:
+  - |
+    In this release, we made tempest/scenario/manager.py a stable interface
+    ready to be consumed by all tempest plugins. The effort was tracked in
+    the following etherpad [1], and the related review can be listed via [2]:
+
+    * [1] https://etherpad.opendev.org/p/tempest-scenario-manager
+    * [2] https://review.opendev.org/#/q/topic:bp/tempest-scenario-manager-stable(status:open+OR+status:merged)
+
+    Some time ago, tempest/scenario/manager.py got copied to most of the plugins
+    and therefore, it diverged - every plugin's copy had slight differences.
+    In this release, we pushed changes to unify the manager's methods and
+    improved their APIs in order to have them easier consumable:
+
+    * we added implementations of methods that were often used in plugins'
+      manager.py however were not implemented in our manager
+    * we improved APIs by f.e. adding a kwargs argument so that the consumers
+      are more in control of the data that are passed to tempest clients
+    * we modified logic of a few methods so that it complies with the plugins'
+      manager versions in order to prepare for a situation when the plugins
+      can reuse Tempest manager as much as possible rather than keeping their
+      own copy
+    * we made methods consistent w.r.t. names and parameters
+    * we split the lengthy methods to have more readable code
+    * previously private methods which had a potential to be reused were
+      made public so that it's clear they are expected to be used in tempest
+      plugins
+    * missing docstrings have been added
diff --git a/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml b/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml
new file mode 100644
index 0000000..de81b2b
--- /dev/null
+++ b/releasenotes/notes/add-alt-project-dynamic-creds-1a3bc543e65d9433.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add project alternate admin, member and reader role for dynamic credentials.
diff --git a/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml b/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml
new file mode 100644
index 0000000..5498688
--- /dev/null
+++ b/releasenotes/notes/add-assisted-volume-snapshot-client-737f5cb35d58c1b6.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Add a new client to handle requests to create and delete assisted volume snapshots.
diff --git a/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml b/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml
new file mode 100644
index 0000000..43a0e8c
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-ide-bus-b63802502c378083.yaml
@@ -0,0 +1,10 @@
+---
+other:
+  - |
+    A new ``[compute-feature-enabled]ide_bus`` config option has been
+    introduced to indicate if the environment supports attaching disks to an
+    instance using an ``IDE`` bus.
+
+    This currently defaults to ``True`` but should be set to ``False`` when
+    using the libvirt OpenStack Nova virt driver *and* the ``q35`` machine type
+    as support for this bus is no longer provided.
diff --git a/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
new file mode 100644
index 0000000..121e060
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add a new config option ``[compute-feature-enabled] shelve_migrate``
+    which enable test for environment that support cold migration of qcow2
+    unshelved instance.
diff --git a/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
new file mode 100644
index 0000000..1840c10
--- /dev/null
+++ b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
@@ -0,0 +1,13 @@
+---
+features:
+  - |
+    Added methods to the identity v3 roles client to support:
+
+    - PUT /v3/system/users/{user}/roles/{role}
+    - GET /v3/system/users/{user}/roles
+    - GET /v3/system/users/{user}/roles/{role}
+    - DELETE /v3/system/users/{user}/roles/{role}
+    - PUT /v3/system/groups/{group}/roles/{role}
+    - GET /v3/system/groups/{group}/roles
+    - GET /v3/system/groups/{group}/roles/{role}
+    - DELETE /v3/system/groups/{group}/roles/{role}
diff --git a/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
new file mode 100644
index 0000000..159bbe8
--- /dev/null
+++ b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - A new config option in the validation section, image_alt_ssh_user,
+    to specify the user name used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to root.
+  - A new config option in the validation section, image_alt_ssh_password,
+    to specify the password used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to password.
diff --git a/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml b/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml
new file mode 100644
index 0000000..2b407ae
--- /dev/null
+++ b/releasenotes/notes/add-keystone-ep-clients-eeefd0904fbbe151.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added missing client methods for keystone's OS-ENDPOINT-POLICY and
+    OS-EP-FILTER APIs.
diff --git a/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
new file mode 100644
index 0000000..ab8d748
--- /dev/null
+++ b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added missing clients and tests for keystone's v3 EC2 API which already
+    existed for keystone v2.
diff --git a/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml b/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml
new file mode 100644
index 0000000..d31a33c
--- /dev/null
+++ b/releasenotes/notes/add-placement-usage-client-method-8b6015cbd8a5e0f6.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add ``placement`` API usage method for evaluating resource class
+    utilization of the resource provider. The following API call is available
+    for tempest from now in the resource_providers_client:
+
+    * GET /resource_providers/{uuid}/usages
diff --git a/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
new file mode 100644
index 0000000..2cd5af6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add show type API to v3 types_client library.
+
+    * default_volume_type
diff --git a/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml b/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml
new file mode 100644
index 0000000..c35dd67
--- /dev/null
+++ b/releasenotes/notes/add-volume-transfers-v3.55-73f75073ad2c4091.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add a ``TransfersV355Client`` to the volume v3 ``transfer_client`` library
+    supporting create, list, show, delete, and accept operations for the `new
+    Volume Transfers API
+    <https://docs.openstack.org/api-ref/block-storage/v3/#volume-transfers-volume-transfers-3-55-or-later>`_
+    of the Block Storage API v3.  The min_microversion of this API is 3.55.
diff --git a/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
new file mode 100644
index 0000000..8e42e85
--- /dev/null
+++ b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added associate_floating_ip() and dissociate_floating_ip() methods
+    to the scenario manager.
diff --git a/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
new file mode 100644
index 0000000..e53411d
--- /dev/null
+++ b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Added public interface create_loginable_secgroup_rule().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
new file mode 100644
index 0000000..3354f65
--- /dev/null
+++ b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Added public interface create_security_group_rule().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml b/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml
new file mode 100644
index 0000000..4a22f8e
--- /dev/null
+++ b/releasenotes/notes/deprecate-image-v1-service-clients-d12ed42210bb76b5.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+  - |
+    Tempest service clients for image v1 APIs (tempest.lib.services.image.v1 module)
+    is deprecated and will be removed once Tempest stop supporting stable Ussuri release
+    which is last release Image v1 APIs are present in glance.
diff --git a/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml b/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml
new file mode 100644
index 0000000..de05679
--- /dev/null
+++ b/releasenotes/notes/deprecate-volume-v2-service-clients-ff8a2a7be1797eb5.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+  - |
+    Tempest service clients for volume v2 APIs (tempest.lib.services.volume.v2 module)
+    is deprecated and will be removed once Tempest stop supporting stable wallaby release
+    which is last release volume v2 APIs are present in cinder.
diff --git a/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
new file mode 100644
index 0000000..fd7a874
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Wallaby development cycle to
+    mark the end of support for EM Stein release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle.
diff --git a/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml b/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml
new file mode 100644
index 0000000..fde6193
--- /dev/null
+++ b/releasenotes/notes/image-client-add-versions-and-tasks-ac289dbfe1c899cc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Adds a method to images_client to get tasks relevant to a given image. Also adds
+    has_version() method to image versions_client to probe for availability of a given
+    API version.
diff --git a/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml b/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml
new file mode 100644
index 0000000..cda3b89
--- /dev/null
+++ b/releasenotes/notes/intermediate-wallaby-release-55a0b31b1dee7b23.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+    This is an intermediate release during the Wallaby development cycle to
+    make new functionality available to plugins and other consumers.
diff --git a/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
new file mode 100644
index 0000000..2779b26
--- /dev/null
+++ b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Added public interface log_console_output().
+    It used to be a private method with name _log_console_output().
+    Since this interface is meant to be used by tempest plugins,
+    It doesn't neccessarily require to be private api.
+
diff --git a/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml b/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml
new file mode 100644
index 0000000..8931f09
--- /dev/null
+++ b/releasenotes/notes/make-create-user-domain-aware-for-v3-creds-client-5054f58e715adc0c.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+  - |
+    [`bug 1613819 <https://bugs.launchpad.net/tempest/+bug/1613819>`_]
+    admin_domain_name and default_credentials_domain_name parameters
+    under [auth] now affect a domain used for creating test users just
+    as they affect it for projects. Previously a domain with an id set
+    to "default" had to be present in order for test user creation to
+    succeed with Keystone v3.
diff --git a/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
new file mode 100644
index 0000000..ff406fb
--- /dev/null
+++ b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
@@ -0,0 +1,6 @@
+---
+prelude: >
+    The integrated horizon dashboard test is now moved
+    from tempest-horizon plugin into Tempest. You do not need
+    to install tempest-horizon to run the horizon test which
+    can be run using Tempest itself.
diff --git a/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
new file mode 100644
index 0000000..1f2d6b9
--- /dev/null
+++ b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    New config option to ``network-feature-enabled``: ``available_features``.
+    This is a list which can contain features that are not discoverable
+    through Neutron API, or it can be the special entry ``all``.
diff --git a/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
new file mode 100644
index 0000000..9e6d49a
--- /dev/null
+++ b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add ``placement`` API methods for testing Routed Provider Networks feature.
+    The following API calls are available for tempest from now in the new
+    resource_providers_client:
+
+    * GET /resource_providers
+    * GET /resource_providers/{uuid}
+    * GET /resource_providers/{uuid}/inventories
+    * GET /resource_providers/{uuid}/aggregates
diff --git a/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
new file mode 100644
index 0000000..42322e4
--- /dev/null
+++ b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    The ``tempest.lib.common.utils.data_utils.random_bytes()`` helper
+    function will no longer allow a ``size`` of more than 1MiB. Tests
+    generally do not need to generate and use large payloads for
+    feature verification and it is easy to lose track of and duplicate
+    large buffers. The sum total of such errors can become problematic
+    in paralllelized and constrained CI environments.
diff --git a/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml b/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml
new file mode 100644
index 0000000..9acb873
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-old-token-clients-e4c2e654132f1130.yaml
@@ -0,0 +1,9 @@
+---
+prelude: >
+    Tempest's identity service client TokenClientJSON and V3TokenClientJSON
+    has been removed.
+upgrade:
+  - |
+    Tempest's identity service client TokenClientJSON and V3TokenClientJSON
+    were deprecated since long which are not removed. Please use new service
+    clients TokenClient and V3TokenClient instead.
diff --git a/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml b/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml
new file mode 100644
index 0000000..f3002f9
--- /dev/null
+++ b/releasenotes/notes/remove-deprecated-volume-config-options-4b7ea93b88e5b982.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    Deprecated config options to select the Volume API version have been
+    removed. Use ``CONF.volume.catalog_type`` to run volume tests under v3
+    or v2 APIs.
+
+    * ``CONF.volume-feature-enabled.api_v2``
+    * ``CONF.volume-feature-enabled.api_v3``
\ No newline at end of file
diff --git a/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml b/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml
new file mode 100644
index 0000000..ac20340
--- /dev/null
+++ b/releasenotes/notes/remove-old-data-utils-e0966f882f7fe23a.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    The old deprecated ``data-ultis`` from ``tempest.common.utils`` has been
+    removed. If you are still using this, use the stable version
+    of ``data-utils`` from new location ``tempest.lib.common.utils``.
diff --git a/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml b/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml
new file mode 100644
index 0000000..3c90f81
--- /dev/null
+++ b/releasenotes/notes/remove-volume-v1-service-clients-9235e3a965f93c09.yaml
@@ -0,0 +1,8 @@
+---
+prelude: >
+    Tempest Service clients for volume v1 APIs are removed.
+upgrade:
+  - |
+    Cinder removed the volume v1 APIs in queens release and Tempest
+    now support only stable train onwards release so all the Tempest
+    service clients for volume v1 APIs are now removed.
diff --git a/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml b/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml
new file mode 100644
index 0000000..af7df93
--- /dev/null
+++ b/releasenotes/notes/support-for-rbac-new-scope-6ec8164ce1e7288c.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+    Support for RBAC new system scope is added in Tempest.
+features:
+  - |
+    Keystone provides the new scoped token called ``system`` which
+    can be used to query the system scoped API operation. Projects
+    are moving towards the policy with new scope types, Keystone, Nova
+    already provide the new policy for RBAC checks. Tempest has added
+    the support to query the system scoped token from keystone to test
+    the new policy.
+    As next step, we will be moving all the Tempest tests on the project's
+    new policy.
diff --git a/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml b/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml
new file mode 100644
index 0000000..26282f0
--- /dev/null
+++ b/releasenotes/notes/support-scope-in-get-roles-dynamic-creds-90bfab163c1c289a.yaml
@@ -0,0 +1,36 @@
+---
+features:
+  - |
+    Dynamic credentials now support the scope type for specific roles
+    too along with ``admin``, ``member``, ``reader`` role.
+    Test can specify the scope in the prefix of ``cls.credentials`` name.
+    If ``system`` is prefix in ``cls.credentials`` name then creds will
+    be created with scope as ``system``. If ``domain`` is prefix in
+    ``cls.credentials`` name then creds will be created with scope as
+    ``domain`` otherwise default ``project`` scope will be used.
+    For Example::
+
+        credentials = [['my_role', 'role1'], # this will be old style and project scoped
+                       ['project_my_role', 'role1'], # this will be project scoped
+                       ['domain_my_role', 'role1'], # this will be domain scoped
+                       ['system_my_role', 'role1']] # this will be system scoped
+
+    And below is how test can access the credential manager of respective
+    credentials type::
+
+        cls.os_my_role.any_client
+        cls.os_project_my_role.any_client
+        cls.os_domain_my_role.any_client
+        cls.os_system_my_role.any_client
+
+
+    For backward compatibility, we set the credentials manager class attribute
+    in old style form too which is prefix with ``os_roles_*``, example
+    ``cls.os_roles_my_role`` but we recommend to use the new style attribute
+    as shown above.
+issues:
+  - |
+    Scope support for specific role is not yet added for pre-provisioned credentials.
+fixes:
+  - |
+    Fixes the `bug# 1917168 <https://bugs.launchpad.net/tempest/+bug/1917168>`_
diff --git a/releasenotes/notes/system-scope-44244cc955a7825f.yaml b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
new file mode 100644
index 0000000..969a71f
--- /dev/null
+++ b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    Adds new personas that can be used to test service policies for all
+    default scopes (project, domain, and system) and roles (reader, member,
+    and admin). Both dynamic credentials and pre-provisioned credentials are
+    supported.
diff --git a/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
new file mode 100644
index 0000000..574f6d9
--- /dev/null
+++ b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+    This release is to tag the Tempest for OpenStack Victoria release.
+    This release marks the start of Victoria release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Victoria
+    * Ussuri
+    * Train
+    * Stein
+
+    Current development of Tempest is for OpenStack Wallaby development
+    cycle. Every Tempest commit is also tested against master during
+    the Wallaby cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Victoria (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Victoria release.
diff --git a/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml b/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml
new file mode 100644
index 0000000..25fb1f2
--- /dev/null
+++ b/releasenotes/notes/tempest-wallaby-release-0f2cea5ccf63485e.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Wallaby release.
+    This release marks the start of Wallaby release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Wallaby
+    * Victoria
+    * Ussuri
+    * Train
+
+    Current development of Tempest is for OpenStack Xena development
+    cycle. Every Tempest commit is also tested against master during
+    the Xena cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a Xena (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Wallaby release.
diff --git a/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml b/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml
new file mode 100644
index 0000000..4d26210
--- /dev/null
+++ b/releasenotes/notes/xenapi_apis-conf-fcca549283e53ed6.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+  - |
+    A number of Compute APIs that only worked with the XenAPI virt driver have
+    been removed in the Compute service. As a result, their corresponding tests
+    are now disabled by default. They can be re-enabled using the new
+    ``[compute_feature_enabled] xenapi_apis`` config option.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 9a9de43..b353a18 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-copyright = u'2016, tempest Developers'
+copyright = '2016, tempest Developers'
 
 # Release do not need a version number in the title, they
 # cover multiple versions.
@@ -194,8 +194,8 @@
 #  author, documentclass [howto, manual, or own class]).
 latex_documents = [
     ('index', 'olso.configReleaseNotes.tex',
-     u'olso.config Release Notes Documentation',
-     u'tempest Developers', 'manual'),
+     'olso.config Release Notes Documentation',
+     'tempest Developers', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
@@ -225,8 +225,8 @@
 # (source start file, name, description, authors, manual section).
 man_pages = [
     ('index', 'olso.configreleasenotes',
-     u'tempest Release Notes Documentation',
-     [u'tempest Developers'], 1)
+     'tempest Release Notes Documentation',
+     ['tempest Developers'], 1)
 ]
 
 # If true, show URL addresses after external links.
@@ -240,8 +240,8 @@
 #  dir menu entry, description, category)
 texinfo_documents = [
     ('index', 'tempestReleaseNotes',
-     u'tempest Release Notes Documentation',
-     u'tempest Developers', 'olso.configReleaseNotes',
+     'tempest Release Notes Documentation',
+     'tempest Developers', 'olso.configReleaseNotes',
      'An OpenStack library for parsing configuration options from the command'
      ' line and configuration files.',
      'Miscellaneous'),
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index d8702f9..ed0a09f 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@
    :maxdepth: 1
 
    unreleased
+   v27.0.0
+   v26.1.0
+   v26.0.0
    v24.0.0
    v23.0.0
    v22.1.0
diff --git a/releasenotes/source/v26.0.0.rst b/releasenotes/source/v26.0.0.rst
new file mode 100644
index 0000000..4161f89
--- /dev/null
+++ b/releasenotes/source/v26.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.0.0 Release Notes
+=====================
+.. release-notes:: 26.0.0 Release Notes
+   :version: 26.0.0
diff --git a/releasenotes/source/v26.1.0.rst b/releasenotes/source/v26.1.0.rst
new file mode 100644
index 0000000..554cf1f
--- /dev/null
+++ b/releasenotes/source/v26.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.1.0 Release Notes
+=====================
+.. release-notes:: 26.1.0 Release Notes
+   :version: 26.1.0
diff --git a/releasenotes/source/v27.0.0.rst b/releasenotes/source/v27.0.0.rst
new file mode 100644
index 0000000..0009124
--- /dev/null
+++ b/releasenotes/source/v27.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v27.0.0 Release Notes
+=====================
+.. release-notes:: 27.0.0 Release Notes
+   :version: 27.0.0
diff --git a/requirements.txt b/requirements.txt
index d8738f0..c71cabe 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,20 +5,19 @@
 cliff!=2.9.0,>=2.8.0 # Apache-2.0
 jsonschema>=3.2.0 # MIT
 testtools>=2.2.0 # MIT
-paramiko>=2.0.0 # LGPLv2.1+
+paramiko>=2.7.0 # LGPLv2.1+
 netaddr>=0.7.18 # BSD
 oslo.concurrency>=3.26.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
 oslo.log>=3.36.0 # Apache-2.0
 stestr>=1.0.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.utils>=3.33.0 # Apache-2.0
-six>=1.10.0 # MIT
+oslo.utils>=4.7.0 # Apache-2.0
 fixtures>=3.0.0 # Apache-2.0/BSD
 PyYAML>=3.12 # MIT
 python-subunit>=1.0.0 # Apache-2.0/BSD
 stevedore>=1.20.0 # Apache-2.0
-PrettyTable<0.8,>=0.7.1 # BSD
+PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
 unittest2>=1.1.0 # BSD
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
deleted file mode 100644
index a8447d2..0000000
--- a/roles/process-stackviz/README.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-Generate stackviz report.
-
-Generate stackviz report using subunit and dstat data, using
-the stackviz archive embedded in test images.
-
-**Role Variables**
-
-.. zuul:rolevar:: devstack_base_dir
-   :default: /opt/stack
-
-   The devstack base directory.
-
-.. zuul:rolevar:: stage_dir
-   :default: "{{ ansible_user_dir }}"
-
-   The stage directory where the input data can be found and
-   the output will be produced.
-
-.. zuul:rolevar:: zuul_work_dir
-   :default: {{ devstack_base_dir }}/tempest
-
-   Directory to work in. It has to be a fully qualified path.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
deleted file mode 100644
index f3bc32b..0000000
--- a/roles/process-stackviz/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-devstack_base_dir: /opt/stack
-stage_dir: "{{ ansible_user_dir }}"
-zuul_work_dir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
deleted file mode 100644
index e3a0a0e..0000000
--- a/roles/process-stackviz/tasks/main.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-- name: Check if stackviz archive exists
-  stat:
-    path: "/opt/cache/files/stackviz-latest.tar.gz"
-  register: stackviz_archive
-
-- debug:
-    msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
-  when: not stackviz_archive.stat.exists
-
-- name: Check if subunit data exists
-  stat:
-    path: "{{ zuul_work_dir }}/testrepository.subunit"
-  register: subunit_input
-
-- debug:
-    msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
-  when: not subunit_input.stat.exists
-
-- name: Install stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-  block:
-    - include_role:
-        name: ensure-pip
-
-    - pip:
-        name: "file://{{ stackviz_archive.stat.path }}"
-        virtualenv: /tmp/stackviz
-        virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
-        extra_args: -U
-
-- name: Deploy stackviz static html+js
-  command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Check if dstat data exists
-  stat:
-    path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
-  register: dstat_input
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-
-- name: Run stackviz with dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - dstat_input.stat.exists
-  failed_when: False
-
-- name: Run stackviz without dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - not dstat_input.stat.exists
-  failed_when: False
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 3643edb..0c72b69 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -32,7 +32,11 @@
 
 .. zuul:rolevar:: tempest_test_blacklist
 
-   Specifies a blacklist file to skip tests that are not needed.
+   DEPRECATED option, please use tempest_test_exclude_list instead.
+
+.. zuul:rolevar:: tempest_test_exclude_list
+
+   Specifies an excludelist file to skip tests that are not needed.
 
    Pass a full path to the file.
 
@@ -44,6 +48,11 @@
 .. zuul:rolevar:: tempest_black_regex
    :default: ''
 
+   DEPRECATED option, please use tempest_exclude_regex instead.
+
+.. zuul:rolevar:: tempest_exclude_regex
+   :default: ''
+
    A regular expression used to skip the tests.
 
    It works only when used with some specific tox environments
@@ -51,7 +60,7 @@
 
        ::
            vars:
-             tempest_black_regex: (tempest.api.identity).*$
+             tempest_exclude_regex: (tempest.api.identity).*$
 
 .. zuul:rolevar:: tox_extra_args
    :default: ''
@@ -72,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/rocky.
+   Upper constraints file to be used for stable branch till stable/stein.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 5867b6c..5f13883 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -1,9 +1,11 @@
 devstack_base_dir: /opt/stack
 tempest_test_regex: ''
 tox_envlist: smoke
-tempest_black_regex: ''
 tox_extra_args: ''
 tempest_test_timeout: ''
 stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
 target_branch: "{{ zuul.branch }}"
 tempest_tox_environment: {}
+# NOTE(gmann): external_bridge_mtu shows as undefined for run-tempest role
+# defining default value here to avoid that error.
+external_bridge_mtu: 0
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 1de3725..a8b3ede 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,17 +25,31 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/rocky
+- name: Use stable branch upper-constraints till stable/stein
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+- name: Use Configured upper-constraints for non-master Tempest
+  set_fact:
+    # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
+    tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) | combine({'TOX_CONSTRAINTS_FILE': devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS']}) }}"
+  when:
+    - devstack_localrc is defined
+    - "'TEMPEST_BRANCH' in devstack_localrc"
+    - "'TEMPEST_VENV_UPPER_CONSTRAINTS' in devstack_localrc"
+    - devstack_localrc['TEMPEST_BRANCH'] != 'master'
+    - devstack_localrc['TEMPEST_VENV_UPPER_CONSTRAINTS'] != 'master'
 
 - name: Set OS_TEST_TIMEOUT if requested
   set_fact:
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
   when: tempest_test_timeout != ''
 
+# TODO(kopecmartin) remove the following 'when block' after all consumers of
+# the role have switched to tempest_test_exclude_list option, until then it's
+# kept here for backward compatibility
 - when:
     - tempest_test_blacklist is defined
   block:
@@ -50,10 +64,42 @@
         blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
       when: blacklist_stat.stat.exists
 
+- when:
+    - tempest_test_exclude_list is defined
+  block:
+    - name: Check for test exclude list file
+      stat:
+        path: "{{ tempest_test_exclude_list }}"
+      register:
+        exclude_list_stat
+
+    - name: Build exclude list option
+      set_fact:
+        exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
+      when: exclude_list_stat.stat.exists
+
+# TODO(kopecmartin) remove this after all consumers of the role have switched
+# to tempest_exclude_regex option, until then it's kept here for the backward
+# compatibility
+- name: Build exclude regex (old param)
+  set_fact:
+    tempest_test_exclude_regex: "--black-regex={{tempest_black_regex|quote}}"
+  when:
+    - tempest_black_regex is defined
+    - tempest_exclude_regex is not defined
+
+- name: Build exclude regex (new param)
+  set_fact:
+    tempest_test_exclude_regex: "--exclude-regex={{tempest_exclude_regex|quote}}"
+  when:
+    - tempest_black_regex is not defined
+    - tempest_exclude_regex is defined
+
 - name: Run Tempest
-  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+  command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
+           {{blacklist_option|default('')}}  {{exclude_list_option|default('')}} \
             --concurrency={{tempest_concurrency|default(default_concurrency)}} \
-            --black-regex={{tempest_black_regex|quote}}
+           {{tempest_test_exclude_regex|default('')}}
   args:
     chdir: "{{devstack_base_dir}}/tempest"
   register: tempest_run_result
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index 70719ca..d1fad90 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -31,3 +31,31 @@
    When true, tempest cleanup creates a report (./dry_run.json) of the
    resources that would be cleaned up if the role was ran with dry_run option
    set to false.
+
+.. zuul:rolevar:: run_tempest_fail_if_leaked_resources
+   :default: false
+
+   When true, the role will fail if any leaked resources are detected.
+   The detection is done via dry_run.json file which if contains any resources,
+   some must have been leaked. This can be also used to verify that tempest
+   cleanup was successful.
+
+
+Role usage
+----------
+
+The role can be also used for verification that tempest tests don't leak any
+resources or to test that 'tempest cleanup' command deleted all leaked
+resources as expected.
+Either way the role needs to be run first with init_saved_state variable set
+to true prior any tempest tests got executed.
+Then, after tempest tests got executed this role needs to be run again with
+role variables set according to the desired outcome:
+
+1. to verify that tempest tests don't leak any resources
+   run_tempest_dry_cleanup and run_tempest_fail_if_leaked_resources have to
+   be set to true.
+
+2. to check that 'tempest cleanup' command deleted all the leaked resources
+   run_tempest_cleanup and run_tempest_fail_if_leaked_resources have to be set
+   to true.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index fc1948a..ce78bdb 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -1,3 +1,4 @@
 devstack_base_dir: /opt/stack
 init_saved_state: false
 dry_run: false
+run_tempest_fail_if_leaked_resources: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
new file mode 100644
index 0000000..46749ab
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -0,0 +1,7 @@
+---
+- name: Run tempest cleanup dry-run
+  become: yes
+  become_user: tempest
+  command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+  args:
+    chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/tempest-cleanup/tasks/dry_run_checker.py b/roles/tempest-cleanup/tasks/dry_run_checker.py
new file mode 100644
index 0000000..9cd9a85
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run_checker.py
@@ -0,0 +1,71 @@
+# Copyright 2020 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for content checking of a given dry_run.json file.
+"""
+
+import argparse
+import json
+import sys
+
+
+def get_parser():
+    parser = argparse.ArgumentParser(__doc__)
+    parser.add_argument('--is-empty', action="store_true", dest='is_empty',
+                        default=False,
+                        help="""Are values of a given dry_run.json empty?""")
+    parser.add_argument('--file', dest='file', default=None, metavar='PATH',
+                        help="A path to a dry_run.json file.")
+    return parser
+
+
+def parse_arguments():
+    parser = get_parser()
+    args = parser.parse_args()
+    if not args.file:
+        sys.stderr.write('Path to a dry_run.json must be specified.\n')
+        sys.exit(1)
+    return args
+
+
+def load_json(path):
+    """Load json content from file addressed by path."""
+    try:
+        with open(path, 'rb') as json_file:
+            json_data = json.load(json_file)
+    except Exception as ex:
+        sys.exit(ex)
+    return json_data
+
+
+def are_values_empty(dry_run_content):
+    """Return true if values of dry_run.json are empty."""
+    for value in dry_run_content.values():
+        if value:
+            return False
+    return True
+
+
+def main():
+    args = parse_arguments()
+    content = load_json(args.file)
+    if args.is_empty:
+        if not are_values_empty(content):
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index 5444afc..c1d63f0 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -12,20 +12,35 @@
 
 - when: dry_run
   block:
-    - name: Run tempest cleanup dry-run
-      become: yes
-      become_user: tempest
-      command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
-      args:
-        chdir: "{{ devstack_base_dir }}/tempest"
+    - import_tasks: dry_run.yaml
 
     - name: Cat dry_run.json
       command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
 
-- name: Run tempest cleanup
-  become: yes
-  become_user: tempest
-  command: tox -evenv-tempest -- tempest cleanup --debug
-  args:
-    chdir: "{{ devstack_base_dir }}/tempest"
-  when: not dry_run and not init_saved_state
+- when:
+    - not dry_run
+    - not init_saved_state
+  block:
+    - name: Run tempest cleanup
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --debug
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+
+- when:
+    - run_tempest_fail_if_leaked_resources
+    - not init_saved_state
+  block:
+    # let's run dry run again, if haven't already, to check no leftover
+    # resources were left behind after the cleanup in the previous task
+    - import_tasks: dry_run.yaml
+      when: not dry_run
+
+    - name: Fail if any resources are leaked
+      become: yes
+      become_user: tempest
+      shell: |
+        python3 roles/tempest-cleanup/tasks/dry_run_checker.py --file {{ devstack_base_dir }}/tempest/dry_run.json --is-empty
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/setup.cfg b/setup.cfg
index 18427a2..d885db0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,12 +1,12 @@
 [metadata]
 name = tempest
 summary = OpenStack Integration Testing
-description-file =
+description_file =
     README.rst
 author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://docs.openstack.org/tempest/latest/
-python-requires = >=3.6
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://docs.openstack.org/tempest/latest/
+python_requires = >=3.6
 classifier =
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -48,4 +48,3 @@
     run = tempest.cmd.run:TempestRun
 oslo.config.opts =
     tempest.config = tempest.config:list_opts
-
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 0901374..4cc5fdd 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -13,12 +13,22 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
 
+
+# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
 class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Agents API"""
+    """Tests Compute Agents API"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(AgentsAdminTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise cls.skipException('The os-agents API is not supported.')
 
     @classmethod
     def setup_clients(cls):
@@ -46,7 +56,7 @@
 
     @decorators.idempotent_id('1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90')
     def test_create_agent(self):
-        # Create an agent.
+        """Test creating a compute agent"""
         params = self._param_helper(
             hypervisor='kvm', os='win', architecture='x86',
             version='7.0', url='xxx://xxxx/xxx/xxx',
@@ -58,6 +68,7 @@
 
     @decorators.idempotent_id('dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e')
     def test_update_agent(self):
+        """Test updating a compute agent"""
         # Create and update an agent.
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
@@ -71,7 +82,7 @@
 
     @decorators.idempotent_id('470e0b89-386f-407b-91fd-819737d0b335')
     def test_delete_agent(self):
-        # Create an agent and delete it.
+        """Test deleting a compute agent"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.client.delete_agent(body['agent_id'])
 
@@ -82,7 +93,7 @@
 
     @decorators.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
     def test_list_agents(self):
-        # Create an agent and  list all agents.
+        """Test listing compute agents"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
         agents = self.client.list_agents()['agents']
@@ -91,7 +102,7 @@
 
     @decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
     def test_list_agents_with_filter(self):
-        # Create agents and list the agent builds by the filter.
+        """Test listing compute agents by the filter"""
         body = self.client.create_agent(**self.params_agent)['agent']
         self.addCleanup(self.client.delete_agent, body['agent_id'])
         params = self._param_helper(
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index c4a8bf5..ccdfbf3 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -42,6 +42,8 @@
     @decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
     @testtools.skipUnless(CONF.validation.run_validation,
                           'Instance validation tests are disabled.')
+    @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+                      "Aarch64 does not support ephemeral disk test")
     def test_verify_created_server_ephemeral_disk(self):
         """Verify that the ephemeral disk is created when creating server"""
         flavor_base = self.flavors_client.show_flavor(
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 45ca10a..ac09cb0 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -46,7 +46,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0621c53e-d45d-40e7-951d-43e5e257b272')
     def test_flavor_access_list_with_public_flavor(self):
-        # Test to list flavor access with exceptions by querying public flavor
+        """Test listing flavor access of a public flavor should fail"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='True')
         self.assertRaises(lib_exc.NotFound,
@@ -56,7 +56,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41eaaade-6d37-4f28-9c74-f21b46ca67bd')
     def test_flavor_non_admin_add(self):
-        # Test to add flavor access as a user without admin privileges.
+        """Test adding flavor access by a non-admin user is forbidden"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
         self.assertRaises(lib_exc.Forbidden,
@@ -67,7 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('073e79a6-c311-4525-82dc-6083d919cb3a')
     def test_flavor_non_admin_remove(self):
-        # Test to remove flavor access as a user without admin privileges.
+        """Test removing flavor access by a non-admin user should fail"""
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
 
@@ -84,6 +84,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f3592cc0-0306-483c-b210-9a7b5346eddc')
     def test_add_flavor_access_duplicate(self):
+        """Test adding duplicate flavor access to same flavor should fail"""
         # Create a new flavor.
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
@@ -104,6 +105,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('1f710927-3bc7-4381-9f82-0ca6e42644b7')
     def test_remove_flavor_access_not_found(self):
+        """Test removing non existent flavor access should fail"""
         # Create a new flavor.
         flavor = self.create_flavor(ram=self.ram, vcpus=self.vcpus,
                                     disk=self.disk, is_public='False')
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log.py b/tempest/api/compute/admin/test_instance_usage_audit_log.py
index 1b62249..c4c0542 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log.py
@@ -14,14 +14,14 @@
 #    under the License.
 
 import datetime
-
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
 from tempest.api.compute import base
 from tempest.lib import decorators
 
 
 class InstanceUsageAuditLogTestJSON(base.BaseV2ComputeAdminTest):
+    """Test instance usage audit logs API"""
 
     @classmethod
     def setup_clients(cls):
@@ -30,12 +30,12 @@
 
     @decorators.idempotent_id('25319919-33d9-424f-9f99-2c203ee48b9d')
     def test_list_instance_usage_audit_logs(self):
-        # list instance usage audit logs
+        """Test listing instance usage audit logs"""
         self.adm_client.list_instance_usage_audit_logs()
 
     @decorators.idempotent_id('6e40459d-7c5f-400b-9e83-449fbc8e7feb')
     def test_get_instance_usage_audit_log(self):
-        # Get instance usage audit log before specified time
+        """Test getting instance usage audit log before specified time"""
         now = datetime.datetime.now()
         self.adm_client.show_instance_usage_audit_log(
             urllib.quote(now.strftime("%Y-%m-%d %H:%M:%S")))
diff --git a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
index de8e221..c115451 100644
--- a/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
+++ b/tempest/api/compute/admin/test_instance_usage_audit_log_negative.py
@@ -14,8 +14,7 @@
 #    under the License.
 
 import datetime
-
-from six.moves.urllib import parse as urllib
+from urllib import parse as urllib
 
 from tempest.api.compute import base
 from tempest.lib import decorators
@@ -23,6 +22,7 @@
 
 
 class InstanceUsageAuditLogNegativeTestJSON(base.BaseV2ComputeAdminTest):
+    """Negative tests of instance usage audit logs"""
 
     @classmethod
     def setup_clients(cls):
@@ -32,7 +32,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a9d33178-d2c9-4131-ad3b-f4ca8d0308a2')
     def test_instance_usage_audit_logs_with_nonadmin_user(self):
-        # the instance_usage_audit_logs API just can be accessed by admin user
+        """Test list/show instance usage audit logs by non-admin should fail
+
+        The instance_usage_audit_logs API just can be accessed by admin user.
+        """
         self.assertRaises(lib_exc.Forbidden,
                           self.instance_usages_audit_log_client.
                           list_instance_usage_audit_logs)
@@ -45,6 +48,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9b952047-3641-41c7-ba91-a809fc5974c8')
     def test_get_instance_usage_audit_logs_with_invalid_time(self):
+        """Test showing instance usage audit logs with invalid time
+
+        Showing instance usage audit logs with invalid time should fail.
+        """
         self.assertRaises(lib_exc.BadRequest,
                           self.adm_client.show_instance_usage_audit_log,
                           "invalid_time")
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 941315e..c91b557 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -23,6 +23,8 @@
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
 CONF = config.CONF
@@ -55,6 +57,10 @@
     def setup_clients(cls):
         super(LiveMigrationTestBase, cls).setup_clients()
         cls.admin_migration_client = cls.os_admin.migrations_client
+        cls.networks_client = cls.os_primary.networks_client
+        cls.subnets_client = cls.os_primary.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
+        cls.trunks_client = cls.os_primary.trunks_client
 
     def _migrate_server_to(self, server_id, dest_host, volume_backed=False):
         kwargs = dict()
@@ -169,8 +175,13 @@
                       block_migrate_cinder_iscsi,
                       'Block Live migration not configured for iSCSI')
     @utils.services('volume')
-    def test_iscsi_volume(self):
-        """Test live migrating a server with volume attached"""
+    def test_live_block_migration_with_attached_volume(self):
+        """Test the live-migration of an instance with an attached volume.
+
+        This tests the live-migration of an instance with both a local disk and
+        attach volume. This differs from test_volume_backed_live_migration
+        above that tests live-migration with only an attached volume.
+        """
         server = self.create_test_server(wait_until="ACTIVE")
         server_id = server['id']
         if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
@@ -192,6 +203,90 @@
 
         self.assertEqual(volume_id1, volume_id2)
 
+    def _create_net_subnet(self, name, cidr):
+        net_name = data_utils.rand_name(name=name)
+        net = self.networks_client.create_network(name=net_name)['network']
+        self.addClassResourceCleanup(
+            self.networks_client.delete_network, net['id'])
+
+        subnet = self.subnets_client.create_subnet(
+            network_id=net['id'],
+            cidr=cidr,
+            ip_version=4)
+        self.addClassResourceCleanup(self.subnets_client.delete_subnet,
+                                     subnet['subnet']['id'])
+        return net
+
+    def _create_port(self, network_id, name):
+        name = data_utils.rand_name(name=name)
+        port = self.ports_client.create_port(name=name,
+                                             network_id=network_id)['port']
+        self.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                     self.ports_client.delete_port,
+                                     port_id=port['id'])
+        return port
+
+    def _create_trunk_with_subport(self):
+        tenant_network = self.get_tenant_network()
+        parent = self._create_port(network_id=tenant_network['id'],
+                                   name='parent')
+        net = self._create_net_subnet(name='subport_net', cidr='19.80.0.0/24')
+        subport = self._create_port(network_id=net['id'], name='subport')
+
+        trunk = self.trunks_client.create_trunk(
+            name=data_utils.rand_name('trunk'),
+            port_id=parent['id'],
+            sub_ports=[{"segmentation_id": 42, "port_id": subport['id'],
+                        "segmentation_type": "vlan"}]
+        )['trunk']
+        self.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
+                                     self.trunks_client.delete_trunk,
+                                     trunk['id'])
+        return trunk, parent, subport
+
+    def _is_port_status_active(self, port_id):
+        port = self.ports_client.show_port(port_id)['port']
+        return port['status'] == 'ACTIVE'
+
+    @decorators.idempotent_id('0022c12e-a482-42b0-be2d-396b5f0cffe3')
+    @utils.requires_ext(service='network', extension='trunk')
+    @utils.services('network')
+    def test_live_migration_with_trunk(self):
+        """Test live migration with trunk and subport"""
+        trunk, parent, subport = self._create_trunk_with_subport()
+
+        server = self.create_test_server(
+            wait_until="ACTIVE", networks=[{'port': parent['id']}])
+
+        # Wait till subport status is ACTIVE
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, subport['id']))
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, parent['id']))
+        subport = self.ports_client.show_port(subport['id'])['port']
+
+        if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
+            # not to specify a host so that the scheduler will pick one
+            target_host = None
+        else:
+            target_host = self.get_host_other_than(server['id'])
+
+        self._live_migrate(server['id'], target_host, 'ACTIVE')
+
+        # Wait till subport status is ACTIVE
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, subport['id']))
+        self.assertTrue(
+            test_utils.call_until_true(
+                self._is_port_status_active, CONF.validation.connect_timeout,
+                5, parent['id']))
+
 
 class LiveMigrationRemoteConsolesV26Test(LiveMigrationTestBase):
     min_microversion = '2.6'
diff --git a/tempest/api/compute/admin/test_live_migration_negative.py b/tempest/api/compute/admin/test_live_migration_negative.py
index 8327a3b..80c0525 100644
--- a/tempest/api/compute/admin/test_live_migration_negative.py
+++ b/tempest/api/compute/admin/test_live_migration_negative.py
@@ -24,6 +24,8 @@
 
 
 class LiveMigrationNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of live migration"""
+
     @classmethod
     def skip_checks(cls):
         super(LiveMigrationNegativeTest, cls).skip_checks()
@@ -40,7 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7fb7856e-ae92-44c9-861a-af62d7830bcb')
     def test_invalid_host_for_migration(self):
-        # Migrating to an invalid host should not change the status
+        """Test migrating to an invalid host should not change the status"""
         target_host = data_utils.rand_name('host')
         server = self.create_test_server(wait_until="ACTIVE")
 
@@ -52,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6e2f94f5-2ee8-4830-bef5-5bc95bb0795b')
     def test_live_block_migration_suspended(self):
+        """Test migrating a suspended server should not change the status"""
         server = self.create_test_server(wait_until="ACTIVE")
 
         self.admin_servers_client.suspend_server(server['id'])
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 37f5aec..89152d6 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -94,6 +94,16 @@
         # Now boot a server with the copied flavor.
         server = self.create_test_server(
             wait_until='ACTIVE', flavor=flavor['id'])
+        server = self.servers_client.show_server(server['id'])['server']
+
+        # If 'id' not in server['flavor'], we can only compare the flavor
+        # details, so here we should save the to-be-deleted flavor's details,
+        # for the flavor comparison after the server resizing.
+        if not server['flavor'].get('id'):
+            pre_flavor = {}
+            body = self.flavors_client.show_flavor(flavor['id'])['flavor']
+            for key in ['name', 'ram', 'vcpus', 'disk']:
+                pre_flavor[key] = body[key]
 
         # Delete the flavor we used to boot the instance.
         self._flavor_clean_up(flavor['id'])
@@ -110,7 +120,18 @@
                                        'ACTIVE')
 
         server = self.servers_client.show_server(server['id'])['server']
-        self.assert_flavor_equal(flavor['id'], server['flavor'])
+        if server['flavor'].get('id'):
+            msg = ('server flavor is not same as flavor!')
+            self.assertEqual(flavor['id'], server['flavor']['id'], msg)
+        else:
+            self.assertEqual(pre_flavor['name'],
+                             server['flavor']['original_name'],
+                             "original_name in server flavor is not same as "
+                             "flavor name!")
+            for key in ['ram', 'vcpus', 'disk']:
+                msg = ('attribute %s in server flavor is not same as '
+                       'flavor!' % key)
+                self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
 
     def _test_cold_migrate_server(self, revert=False):
         if CONF.compute.min_compute_nodes < 2:
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 0060ffe..9d5e0c9 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -97,9 +97,11 @@
 
 
 class QuotasAdminTestJSON(QuotasAdminTestBase):
+    """Test compute quotas by admin user"""
+
     @decorators.idempotent_id('3b0a7c8f-cf58-46b8-a60c-715a32a8ba7d')
     def test_get_default_quotas(self):
-        # Admin can get the default resource quota set for a tenant
+        """Test admin can get the default compute quota set for a project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = self.adm_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
@@ -109,7 +111,7 @@
 
     @decorators.idempotent_id('55fbe2bf-21a9-435b-bbd2-4162b0ed799a')
     def test_update_all_quota_resources_for_tenant(self):
-        # Admin can update all the resource quota limits for a tenant
+        """Test admin can update all the compute quota limits for a project"""
         default_quota_set = self.adm_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
         new_quota_set = {'metadata_items': 256, 'ram': 10240,
@@ -140,11 +142,12 @@
     # TODO(afazekas): merge these test cases
     @decorators.idempotent_id('ce9e0815-8091-4abd-8345-7fe5b85faa1d')
     def test_get_updated_quotas(self):
+        """Test that GET shows the updated quota set of project"""
         self._get_updated_quotas()
 
     @decorators.idempotent_id('389d04f0-3a41-405f-9317-e5f86e3c44f0')
     def test_delete_quota(self):
-        # Admin can delete the resource quota set for a project
+        """Test admin can delete the compute quota set for a project"""
         project_name = data_utils.rand_name('ram_quota_project')
         project_desc = project_name + '-desc'
         project = identity.identity_utils(self.os_admin).create_project(
@@ -165,26 +168,40 @@
 
 
 class QuotasAdminTestV236(QuotasAdminTestBase):
-    min_microversion = '2.36'
+    """Test compute quotas with microversion greater than 2.35
+
     # NOTE(gmann): This test tests the Quota APIs response schema
     # for 2.36 microversion. No specific assert or behaviour verification
     # is needed.
+    """
+
+    min_microversion = '2.36'
 
     @decorators.idempotent_id('4268b5c9-92e5-4adc-acf1-3a2798f3d803')
     def test_get_updated_quotas(self):
-        # Checking Quota update, get, get details APIs response schema
+        """Test compute quotas API with microversion greater than 2.35
+
+        Checking compute quota update, get, get details APIs response schema.
+        """
         self._get_updated_quotas()
 
 
 class QuotasAdminTestV257(QuotasAdminTestBase):
-    min_microversion = '2.57'
+    """Test compute quotas with microversion greater than 2.56
+
     # NOTE(gmann): This test tests the Quota APIs response schema
     # for 2.57 microversion. No specific assert or behaviour verification
     # is needed.
+    """
+
+    min_microversion = '2.57'
 
     @decorators.idempotent_id('e641e6c6-e86c-41a4-9e5c-9493c0ae47ad')
     def test_get_updated_quotas(self):
-        # Checking Quota update, get, get details APIs response schema
+        """Test compute quotas API with microversion greater than 2.56
+
+        Checking compute quota update, get, get details APIs response schema.
+        """
         self._get_updated_quotas()
 
 
@@ -212,6 +229,7 @@
     # 'danger' flag.
     @decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
     def test_update_default_quotas(self):
+        """Test updating default compute quota class set"""
         # get the current 'default' quota class values
         body = (self.adm_client.show_quota_class_set('default')
                 ['quota_class_set'])
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index dfa801b..f0a6a84 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -20,6 +20,12 @@
 
 
 class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
+    """Test security groups API that requires admin privilege
+
+    Test security groups API that requires admin privilege with compute
+    microversion less than 2.36
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -37,7 +43,17 @@
     @decorators.idempotent_id('49667619-5af9-4c63-ab5d-2cfdd1c8f7f1')
     @utils.services('network')
     def test_list_security_groups_list_all_tenants_filter(self):
-        # Admin can list security groups of all tenants
+        """Test listing security groups with all_tenants filter
+
+        1. Create two security groups for non-admin user
+        2. Create two security groups for admin user
+        3. Fetch all security groups based on 'all_tenants' search filter by
+           admin, check that all four created security groups are present in
+           fetched list
+        4. Fetch all security groups based on 'all_tenants' search filter by
+           non-admin, check only two security groups created by the provided
+           non-admin user are present in fetched list
+        """
         # List of all security groups created
         security_group_list = []
         # Create two security groups for a non-admin tenant
diff --git a/tempest/api/compute/admin/test_server_diagnostics.py b/tempest/api/compute/admin/test_server_diagnostics.py
index 005efdd..d855a62 100644
--- a/tempest/api/compute/admin/test_server_diagnostics.py
+++ b/tempest/api/compute/admin/test_server_diagnostics.py
@@ -19,6 +19,8 @@
 
 
 class ServerDiagnosticsTest(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion less than 2.48"""
+
     min_microversion = None
     max_microversion = '2.47'
 
@@ -29,6 +31,7 @@
 
     @decorators.idempotent_id('31ff3486-b8a0-4f56-a6c0-aab460531db3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         diagnostics = self.client.show_server_diagnostics(server_id)
 
@@ -41,6 +44,8 @@
 
 
 class ServerDiagnosticsV248Test(base.BaseV2ComputeAdminTest):
+    """Test server diagnostics with compute microversion greater than 2.47"""
+
     min_microversion = '2.48'
     max_microversion = 'latest'
 
@@ -51,6 +56,7 @@
 
     @decorators.idempotent_id('64d0d48c-dff1-11e6-bf01-fe55135034f3')
     def test_get_server_diagnostics(self):
+        """Test getting server diagnostics"""
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         # Response status and filed types will be checked by json schema
         self.client.show_server_diagnostics(server_id)
diff --git a/tempest/api/compute/admin/test_server_diagnostics_negative.py b/tempest/api/compute/admin/test_server_diagnostics_negative.py
index 6215c37..8f14cbc 100644
--- a/tempest/api/compute/admin/test_server_diagnostics_negative.py
+++ b/tempest/api/compute/admin/test_server_diagnostics_negative.py
@@ -18,6 +18,7 @@
 
 
 class ServerDiagnosticsNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of server diagnostics"""
 
     @classmethod
     def setup_clients(cls):
@@ -27,7 +28,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e84e2234-60d2-42fa-8b30-e2d3049724ac')
     def test_get_server_diagnostics_by_non_admin(self):
-        # Non-admin user cannot view server diagnostics according to policy
+        """Test getting server diagnostics by non-admin user is forbidden
+
+        Non-admin user cannot view server diagnostics according to policy.
+        """
         server_id = self.create_test_server(wait_until='ACTIVE')['id']
         self.assertRaises(lib_exc.Forbidden,
                           self.client.show_server_diagnostics, server_id)
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 170b2cc..ab1b49a 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -14,10 +14,13 @@
 
 from tempest.api.compute import base
 from tempest.common import waiters
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Servers API using admin privileges"""
@@ -45,7 +48,7 @@
 
     @decorators.idempotent_id('06f960bb-15bb-48dc-873d-f96e89be7870')
     def test_list_servers_filter_by_error_status(self):
-        # Filter the list of servers by server error status
+        """Test filtering the list of servers by server error status"""
         params = {'status': 'error'}
         self.client.reset_state(self.s1_id, state='error')
         body = self.non_admin_client.list_servers(**params)
@@ -61,6 +64,7 @@
 
     @decorators.idempotent_id('d56e9540-73ed-45e0-9b88-98fc419087eb')
     def test_list_servers_detailed_filter_by_invalid_status(self):
+        """Test filtering the list of servers by invalid server status"""
         params = {'status': 'invalid_status'}
         if self.is_requested_microversion_compatible('2.37'):
             body = self.client.list_servers(detail=True, **params)
@@ -72,8 +76,11 @@
 
     @decorators.idempotent_id('51717b38-bdc1-458b-b636-1cf82d99f62f')
     def test_list_servers_by_admin(self):
-        # Listing servers by admin user returns a list which doesn't
-        # contain the other tenants' server by default
+        """Test listing servers by admin without other projects
+
+        Listing servers by admin user returns a list which doesn't
+        contain the other projects' server by default.
+        """
         body = self.client.list_servers(detail=True)
         servers = body['servers']
 
@@ -85,8 +92,11 @@
 
     @decorators.idempotent_id('9f5579ae-19b4-4985-a091-2a5d56106580')
     def test_list_servers_by_admin_with_all_tenants(self):
-        # Listing servers by admin user with all tenants parameter
-        # Here should be listed all servers
+        """Test listing servers by admin with all tenants
+
+        Listing servers by admin user with all tenants parameter,
+        all servers should be listed.
+        """
         params = {'all_tenants': ''}
         body = self.client.list_servers(detail=True, **params)
         servers = body['servers']
@@ -98,8 +108,10 @@
     @decorators.related_bug('1659811')
     @decorators.idempotent_id('7e5d6b8f-454a-4ba1-8ae2-da857af8338b')
     def test_list_servers_by_admin_with_specified_tenant(self):
-        # In nova v2, tenant_id is ignored unless all_tenants is specified
+        """Test listing servers by admin with specified project
 
+        In nova v2, tenant_id is ignored unless all_tenants is specified.
+        """
         # List the primary tenant but get nothing due to odd specified behavior
         tenant_id = self.non_admin_client.tenant_id
         params = {'tenant_id': tenant_id}
@@ -128,7 +140,7 @@
 
     @decorators.idempotent_id('86c7a8f7-50cf-43a9-9bac-5b985317134f')
     def test_list_servers_filter_by_exist_host(self):
-        # Filter the list of servers by existent host
+        """Test filtering the list of servers by existent host"""
         server = self.client.show_server(self.s1_id)['server']
         hostname = server['OS-EXT-SRV-ATTR:host']
         params = {'host': hostname, 'all_tenants': '1'}
@@ -144,6 +156,7 @@
 
     @decorators.idempotent_id('ee8ae470-db70-474d-b752-690b7892cab1')
     def test_reset_state_server(self):
+        """Test resetting server state to error/active"""
         # Reset server's state to 'error'
         self.client.reset_state(self.s1_id, state='error')
 
@@ -160,9 +173,11 @@
 
     @decorators.idempotent_id('682cb127-e5bb-4f53-87ce-cb9003604442')
     def test_rebuild_server_in_error_state(self):
-        # The server in error state should be rebuilt using the provided
-        # image and changed to ACTIVE state
+        """Test rebuilding server in error state
 
+        The server in error state should be rebuilt using the provided
+        image and changed to ACTIVE state.
+        """
         # resetting vm state require admin privilege
         self.client.reset_state(self.s1_id, state='error')
         rebuilt_server = self.non_admin_client.rebuild_server(
@@ -188,6 +203,11 @@
 
     @decorators.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
     def test_reset_network_inject_network_info(self):
+        """Test resetting and injecting network info of a server"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'The resetNetwork server action is not supported.')
+
         # Reset Network of a Server
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.reset_network(server['id'])
@@ -196,6 +216,7 @@
 
     @decorators.idempotent_id('fdcd9b33-0903-4e00-a1f7-b5f6543068d6')
     def test_create_server_with_scheduling_hint(self):
+        """Test creating server with scheduling hint"""
         # Create a server with scheduler hints.
         hints = {
             'same_host': self.s1_id
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index f720b84..f52d4c0 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -26,7 +26,7 @@
 
 
 class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Servers API using admin privileges"""
+    """Negative Tests of Servers API using admin privileges"""
 
     @classmethod
     def setup_clients(cls):
@@ -47,6 +47,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_ram(self):
+        """Test resizing server using over limit ram should fail"""
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
         quota_set = self.quotas_client.show_quota_set(
@@ -69,6 +70,7 @@
                           'Resize not available.')
     @decorators.attr(type=['negative'])
     def test_resize_server_using_overlimit_vcpus(self):
+        """Test resizing server using over limit vcpus should fail"""
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
         quota_set = self.quotas_client.show_quota_set(
@@ -89,6 +91,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('b0b4d8af-1256-41ef-9ee7-25f1c19dde80')
     def test_reset_state_server_invalid_state(self):
+        """Test resetting server state to invalid state value should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state='invalid')
@@ -96,6 +99,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4cdcc984-fab0-4577-9a9d-6d558527ee9d')
     def test_reset_state_server_invalid_type(self):
+        """Test resetting server state to invalid state type should fail"""
         self.assertRaises(lib_exc.BadRequest,
                           self.client.reset_state, self.s1_id,
                           state=1)
@@ -103,13 +107,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e741298b-8df2-46f0-81cb-8f814ff2504c')
     def test_reset_state_server_nonexistent_server(self):
+        """Test resetting a non existent server's state should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.reset_state, '999', state='error')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('46a4e1ca-87ae-4d28-987a-1b6b136a0221')
     def test_migrate_non_existent_server(self):
-        # migrate a non existent server
+        """Test migrating a non existent server should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.migrate_server,
                           data_utils.rand_uuid())
@@ -121,6 +126,7 @@
                           'Suspend is not available.')
     @decorators.attr(type=['negative'])
     def test_migrate_server_invalid_state(self):
+        """Test migrating a server with invalid state should fail"""
         # create server.
         server = self.create_test_server(wait_until='ACTIVE')
         server_id = server['id']
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
new file mode 100644
index 0000000..cf8c560
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume.py
@@ -0,0 +1,116 @@
+# Copyright 2020 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseAttachSCSIVolumeTest(base.BaseV2ComputeAdminTest):
+    """Base class for the admin volume tests in this module."""
+    create_default_network = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseAttachSCSIVolumeTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
+
+    def _create_image_with_custom_property(self, **kwargs):
+        """Wrapper utility that returns the custom image.
+
+        Creates a new image by downloading the default image's bits and
+        uploading them to a new image. Any kwargs are set as image properties
+        on the new image.
+
+        :param return image_id: The UUID of the newly created image.
+        """
+        image = self.image_client.show_image(CONF.compute.image_ref)
+        image_data = self.image_client.show_image_file(
+            CONF.compute.image_ref).data
+        image_file = io.BytesIO(image_data)
+        create_dict = {
+            'container_format': image['container_format'],
+            'disk_format': image['disk_format'],
+            'min_disk': image['min_disk'],
+            'min_ram': image['min_ram'],
+            'visibility': 'public',
+        }
+        create_dict.update(kwargs)
+        new_image = self.image_client.create_image(**create_dict)
+        self.addCleanup(self.image_client.wait_for_resource_deletion,
+                        new_image['id'])
+        self.addCleanup(self.image_client.delete_image, new_image['id'])
+        self.image_client.store_image_file(new_image['id'], image_file)
+
+        return new_image['id']
+
+
+class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
+    """Test attaching scsi volume to server"""
+
+    @decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
+    def test_attach_scsi_disk_with_config_drive(self):
+        """Test the attach/detach volume with config drive/scsi disk
+
+        Enable the config drive, followed by booting an instance
+        from an image with meta properties hw_cdrom: scsi and use
+        virtio-scsi mode with further asserting list volume attachments
+        in instance after attach and detach of the volume.
+        """
+        custom_img = self._create_image_with_custom_property(
+            hw_scsi_model='virtio-scsi',
+            hw_disk_bus='scsi',
+            hw_cdrom_bus='scsi')
+        server = self.create_test_server(image_id=custom_img,
+                                         config_drive=True,
+                                         wait_until='ACTIVE')
+
+        # NOTE(lyarwood): self.create_test_server delete the server
+        # at class level cleanup so add server cleanup to ensure that
+        # the instance is deleted first before created image. This
+        # avoids failures when using the rbd backend is used for both
+        # Glance and Nova ephemeral storage. Also wait until server is
+        # deleted otherwise image deletion can start before server is
+        # deleted.
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        volume = self.create_volume()
+        attachment = self.attach_volume(server, volume)
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'in-use')
+        volume_after_attach = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(1, len(volume_after_attach),
+                         "Failed to attach volume")
+        self.servers_client.detach_volume(
+            server['id'], attachment['volumeId'])
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'available')
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index edcb1a7..c1236a7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -68,21 +68,7 @@
 
 
 class TestVolumeSwap(TestVolumeSwapBase):
-    """The test suite for swapping of volume with admin user.
-
-    The following is the scenario outline:
-
-    1. Create a volume "volume1" with non-admin.
-    2. Create a volume "volume2" with non-admin.
-    3. Boot an instance "instance1" with non-admin.
-    4. Attach "volume1" to "instance1" with non-admin.
-    5. Swap volume from "volume1" to "volume2" as admin.
-    6. Check the swap volume is successful and "volume2"
-       is attached to "instance1" and "volume1" is in available state.
-    7. Swap volume from "volume2" to "volume1" as admin.
-    8. Check the swap volume is successful and "volume1"
-       is attached to "instance1" and "volume2" is in available state.
-    """
+    """The test suite for swapping of volume with admin user"""
 
     # NOTE(mriedem): This is an uncommon scenario to call the compute API
     # to swap volumes directly; swap volume is primarily only for volume
@@ -92,6 +78,21 @@
     @decorators.idempotent_id('1769f00d-a693-4d67-a631-6a3496773813')
     @utils.services('volume')
     def test_volume_swap(self):
+        """Test swapping of volume attached to server with admin user
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot an instance "instance1" with non-admin.
+        4. Attach "volume1" to "instance1" with non-admin.
+        5. Swap volume from "volume1" to "volume2" as admin.
+        6. Check the swap volume is successful and "volume2"
+           is attached to "instance1" and "volume1" is in available state.
+        7. Swap volume from "volume2" to "volume1" as admin.
+        8. Check the swap volume is successful and "volume1"
+           is attached to "instance1" and "volume2" is in available state.
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
@@ -134,6 +135,12 @@
 
 
 class TestMultiAttachVolumeSwap(TestVolumeSwapBase):
+    """Test swapping volume attached to multiple servers
+
+    Test swapping volume attached to multiple servers with microversion
+    greater than 2.59
+    """
+
     min_microversion = '2.60'
     max_microversion = 'latest'
 
@@ -164,6 +171,20 @@
                              condition=CONF.compute.min_compute_nodes > 1)
     @utils.services('volume')
     def test_volume_swap_with_multiattach(self):
+        """Test swapping volume attached to multiple servers
+
+        The following is the scenario outline:
+
+        1. Create a volume "volume1" with non-admin.
+        2. Create a volume "volume2" with non-admin.
+        3. Boot 2 instances "server1" and "server2" with non-admin.
+        4. Attach "volume1" to "server1" with non-admin.
+        5. Attach "volume1" to "server2" with non-admin.
+        6. Swap "volume1" to "volume2" on "server1"
+        7. Check "volume1" is attached to "server2" and not attached to
+           "server1"
+        8. Check "volume2" is attached to "server1".
+        """
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 7b0f48b..10d522b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -23,6 +23,8 @@
 
 
 class VolumesAdminNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of volume swapping"""
+
     create_default_network = True
 
     @classmethod
@@ -40,6 +42,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
+        """Test swapping non existent volume should fail"""
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -51,6 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
+        """Test swapping volume to a non existence volume should fail"""
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -62,6 +66,12 @@
 
 
 class UpdateMultiattachVolumeNegativeTest(base.BaseV2ComputeAdminTest):
+    """Negative tests of swapping volume attached to multiple servers
+
+    Negative tests of swapping volume attached to multiple servers with
+    compute microversion greater than 2.59 and volume microversion greater
+    than 3.26
+    """
 
     min_microversion = '2.60'
     volume_min_microversion = '3.27'
@@ -76,7 +86,16 @@
     @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
     @utils.services('volume')
     def test_multiattach_rw_volume_update_failure(self):
+        """Test swapping volume attached to multi-servers with read-write mode
 
+        1. Create two volumes "vol1" and "vol2"
+        2. Create two instances "server1" and "server2"
+        3. Attach "vol1" to both of these instances
+        4. By default both of these attachments should have an attach_mode of
+           read-write, so trying to swap "vol1" to "vol2" should fail
+        5. Check "vol1" is still attached to both servers
+        6. Check "vol2" is not attached to any server
+        """
         # Create two multiattach capable volumes.
         vol1 = self.create_volume(multiattach=True)
         vol2 = self.create_volume(multiattach=True)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 74570ce..7900b77 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -171,8 +171,11 @@
         cls.flavor_ref = CONF.compute.flavor_ref
         cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
         cls.ssh_user = CONF.validation.image_ssh_user
+        cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_user = CONF.validation.image_ssh_user
+        cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_password = CONF.validation.image_ssh_password
+        cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
 
     @classmethod
     def is_requested_microversion_compatible(cls, max_version):
@@ -566,17 +569,19 @@
         # the state of the volume to change to available. This is so we don't
         # error out when trying to delete the volume during teardown.
         if volume['multiattach']:
+            att = waiters.wait_for_volume_attachment_create(
+                self.volumes_client, volume['id'], server['id'])
             self.addCleanup(waiters.wait_for_volume_attachment_remove,
                             self.volumes_client, volume['id'],
-                            attachment['id'])
+                            att['attachment_id'])
         else:
             self.addCleanup(waiters.wait_for_volume_resource_status,
                             self.volumes_client, volume['id'], 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume['id'], 'in-use')
         # Ignore 404s on detach in case the server is deleted or the volume
         # is already detached.
         self.addCleanup(self._detach_volume, server, volume)
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'in-use')
         return attachment
 
     def create_volume_snapshot(self, volume_id, name=None, description=None,
@@ -632,6 +637,9 @@
             cls.os_admin.availability_zone_client)
         cls.admin_flavors_client = cls.os_admin.flavors_client
         cls.admin_servers_client = cls.os_admin.servers_client
+        cls.image_client = cls.os_admin.image_client_v2
+        cls.admin_assisted_volume_snapshots_client = \
+            cls.os_admin.assisted_volume_snapshots_client
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 58861a1..9ab75c5 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -28,6 +28,9 @@
         flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
         flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
                              'name': flavor['name']}
+        # description field is added to the response of list_flavors in 2.55
+        if not self.is_requested_microversion_compatible('2.54'):
+            flavor_min_detail.update({'description': flavor['description']})
         self.assertIn(flavor_min_detail, flavors)
 
     @decorators.idempotent_id('6e85fde4-b3cd-4137-ab72-ed5f418e8c24')
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 235049a..5d6a7d7 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -13,10 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
 import random
 
-import six
-
 from tempest.api.compute import base
 from tempest.common import image as common_image
 from tempest.common import utils
@@ -44,7 +43,7 @@
             CONF.compute.flavor_ref)['flavor']
         min_img_ram = flavor['ram'] + 1
         size = random.randint(1024, 4096)
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         params = {
             'name': data_utils.rand_name('image'),
             'container_format': CONF.image.container_formats[0],
diff --git a/tempest/api/compute/images/test_image_metadata.py b/tempest/api/compute/images/test_image_metadata.py
index 1f3af5f..ece983d 100644
--- a/tempest/api/compute/images/test_image_metadata.py
+++ b/tempest/api/compute/images/test_image_metadata.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.compute import base
 from tempest.common import image as common_image
@@ -28,6 +28,8 @@
 
 
 class ImagesMetadataTestJSON(base.BaseV2ComputeTest):
+    """Test image metadata with compute microversion less than 2.39"""
+
     max_microversion = '2.38'
 
     @classmethod
@@ -75,7 +77,7 @@
         cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                     cls.glance_client.delete_image,
                                     cls.image_id)
-        image_file = six.BytesIO((b'*' * 1024))
+        image_file = io.BytesIO((b'*' * 1024))
         if CONF.image_feature_enabled.api_v1:
             cls.glance_client.update_image(cls.image_id, data=image_file)
         else:
@@ -89,7 +91,10 @@
 
     @decorators.idempotent_id('37ec6edd-cf30-4c53-bd45-ae74db6b0531')
     def test_list_image_metadata(self):
-        # All metadata key/value pairs for an image should be returned
+        """Test listing image metadata
+
+        All metadata key/value pairs for an image should be returned.
+        """
         resp_metadata = self.client.list_image_metadata(self.image_id)
         expected = {'metadata': {
             'os_version': 'value1', 'os_distro': 'value2'}}
@@ -97,7 +102,10 @@
 
     @decorators.idempotent_id('ece7befc-d3ce-42a4-b4be-c3067a418c29')
     def test_set_image_metadata(self):
-        # The metadata for the image should match the new values
+        """Test setting image metadata
+
+        The metadata for the image should match the new values.
+        """
         req_metadata = {'os_version': 'value2', 'architecture': 'value3'}
         self.client.set_image_metadata(self.image_id,
                                        req_metadata)
@@ -108,7 +116,10 @@
 
     @decorators.idempotent_id('7b491c11-a9d5-40fe-a696-7f7e03d3fea2')
     def test_update_image_metadata(self):
-        # The metadata for the image should match the updated values
+        """Test updating image medata
+
+        The metadata for the image should match the updated values.
+        """
         req_metadata = {'os_version': 'alt1', 'architecture': 'value3'}
         self.client.update_image_metadata(self.image_id,
                                           req_metadata)
@@ -122,15 +133,21 @@
 
     @decorators.idempotent_id('4f5db52f-6685-4c75-b848-f4bb363f9aa6')
     def test_get_image_metadata_item(self):
-        # The value for a specific metadata key should be returned
+        """Test getting image metadata item
+
+        The value for a specific metadata key should be returned.
+        """
         meta = self.client.show_image_metadata_item(self.image_id,
                                                     'os_distro')['meta']
         self.assertEqual('value2', meta['os_distro'])
 
     @decorators.idempotent_id('f2de776a-4778-4d90-a5da-aae63aee64ae')
     def test_set_image_metadata_item(self):
-        # The value provided for the given meta item should be set for
-        # the image
+        """Test setting image metadata item
+
+        The value provided for the given meta item should be set for
+        the image.
+        """
         meta = {'os_version': 'alt'}
         self.client.set_image_metadata_item(self.image_id,
                                             'os_version', meta)
@@ -140,7 +157,10 @@
 
     @decorators.idempotent_id('a013796c-ba37-4bb5-8602-d944511def14')
     def test_delete_image_metadata_item(self):
-        # The metadata value/key pair should be deleted from the image
+        """Test deleting image metadata item
+
+        The metadata value/key pair should be deleted from the image.
+        """
         self.client.delete_image_metadata_item(self.image_id,
                                                'os_version')
         resp_metadata = self.client.list_image_metadata(self.image_id)
diff --git a/tempest/api/compute/images/test_image_metadata_negative.py b/tempest/api/compute/images/test_image_metadata_negative.py
index 407fb08..b9806c7 100644
--- a/tempest/api/compute/images/test_image_metadata_negative.py
+++ b/tempest/api/compute/images/test_image_metadata_negative.py
@@ -20,6 +20,11 @@
 
 
 class ImagesMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of image metadata
+
+    Negative tests of image metadata with compute microversion less than 2.39.
+    """
+
     max_microversion = '2.38'
 
     @classmethod
@@ -30,15 +35,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('94069db2-792f-4fa8-8bd3-2271a6e0c095')
     def test_list_nonexistent_image_metadata(self):
-        # Negative test: List on nonexistent image
-        # metadata should not happen
+        """Test listing metadata of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.list_image_metadata,
                           data_utils.rand_uuid())
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a403ef9e-9f95-427c-b70a-3ce3388796f1')
     def test_update_nonexistent_image_metadata(self):
-        # Negative test:An update should not happen for a non-existent image
+        """Test updating metadata of a non existence image should fail"""
         meta = {'os_distro': 'alt1', 'os_version': 'alt2'}
         self.assertRaises(lib_exc.NotFound,
                           self.client.update_image_metadata,
@@ -47,7 +51,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('41ae052c-6ee6-405c-985e-5712393a620d')
     def test_get_nonexistent_image_metadata_item(self):
-        # Negative test: Get on non-existent image should not happen
+        """Test getting metadata of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.show_image_metadata_item,
                           data_utils.rand_uuid(), 'os_version')
@@ -55,7 +59,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dc64f2ce-77e8-45b0-88c8-e15041d08eaf')
     def test_set_nonexistent_image_metadata(self):
-        # Negative test: Metadata should not be set to a non-existent image
+        """Test setting metadata of a non existence image should fail"""
         meta = {'os_distro': 'alt1', 'os_version': 'alt2'}
         self.assertRaises(lib_exc.NotFound, self.client.set_image_metadata,
                           data_utils.rand_uuid(), meta)
@@ -63,8 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('2154fd03-ab54-457c-8874-e6e3eb56e9cf')
     def test_set_nonexistent_image_metadata_item(self):
-        # Negative test: Metadata item should not be set to a
-        # nonexistent image
+        """Test setting metadata item of a non existence image should fail"""
         meta = {'os_distro': 'alt'}
         self.assertRaises(lib_exc.NotFound,
                           self.client.set_image_metadata_item,
@@ -74,8 +77,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('848e157f-6bcf-4b2e-a5dd-5124025a8518')
     def test_delete_nonexistent_image_metadata_item(self):
-        # Negative test: Shouldn't be able to delete metadata
-        # item from non-existent image
+        """Test deleting metadata item of a non existence image should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_image_metadata_item,
                           data_utils.rand_uuid(), 'os_distro')
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index b811421..23f8326 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -22,6 +22,7 @@
 
 
 class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+    """Test server images API"""
 
     @classmethod
     def resource_setup(cls):
@@ -54,6 +55,7 @@
 
     @decorators.idempotent_id('3731d080-d4c5-4872-b41a-64d0d0021314')
     def test_create_delete_image(self):
+        """Test create/delete server image"""
         if self.is_requested_microversion_compatible('2.35'):
             MIN_DISK = 'minDisk'
             MIN_RAM = 'minRam'
@@ -93,6 +95,7 @@
 
     @decorators.idempotent_id('3b7c6fe4-dfe7-477c-9243-b06359db51e6')
     def test_create_image_specify_multibyte_character_image_name(self):
+        """Test creating server image with multibyte character image name"""
         # prefix character is:
         # http://unicode.org/cldr/utility/character.jsp?a=20A1
 
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 37f9be3..275a26f 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -30,6 +30,8 @@
 
 
 class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server images"""
+
     create_default_network = True
 
     def tearDown(self):
@@ -87,7 +89,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('55d1d38c-dd66-4933-9c8e-7d92aeb60ddc')
     def test_create_image_specify_invalid_metadata(self):
-        # Return an error when creating image with invalid metadata
+        """Test creating server image with invalid metadata should fail"""
         meta = {'': ''}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -95,7 +97,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('3d24d11f-5366-4536-bd28-cff32b748eca')
     def test_create_image_specify_metadata_over_limits(self):
-        # Return an error when creating image with meta data over 255 chars
+        """Test creating server image with metadata over 255 should fail"""
         meta = {'a' * 256: 'b' * 256}
         self.assertRaises(lib_exc.BadRequest, self.create_image_from_server,
                           self.server_id, metadata=meta)
@@ -103,28 +105,40 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0460efcf-ee88-4f94-acef-1bf658695456')
     def test_create_second_image_when_first_image_is_being_saved(self):
-        # Disallow creating another image when first image is being saved
+        """Test creating another server image when first image is being saved
 
-        # Create first snapshot
-        image = self.create_image_from_server(self.server_id)
-        self.addCleanup(self._reset_server)
+        Creating another server image when first image is being saved is
+        not allowed.
+        """
+        try:
+            # Create first snapshot
+            image = self.create_image_from_server(self.server_id)
+            self.addCleanup(self._reset_server)
 
-        # Create second snapshot
-        self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
-                          self.server_id)
+            # Create second snapshot
+            self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
+                              self.server_id)
 
-        if api_version_utils.compare_version_header_to_response(
-            "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
-            image_id = image['image_id']
-        else:
-            image_id = data_utils.parse_image_id(image.response['location'])
-        self.client.delete_image(image_id)
+            if api_version_utils.compare_version_header_to_response(
+                "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+                image_id = image['image_id']
+            else:
+                image_id = data_utils.parse_image_id(
+                    image.response['location'])
+            self.client.delete_image(image_id)
+
+        except lib_exc.TimeoutException as ex:
+            # Test cannot capture the image saving state.
+            # If timeout is reached, we don't need to check state,
+            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # it, this testcase doesn't have scope for other state transition
+            # Hence, skip the test.
+            raise self.skipException("This test is skipped because " + str(ex))
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
     def test_create_image_specify_name_over_character_limit(self):
-        # Return an error if snapshot name over 255 characters is passed
-
+        """Test creating server image with image name over 255 should fail"""
         snapshot_name = ('a' * 256)
         self.assertRaises(lib_exc.BadRequest,
                           self.compute_images_client.create_image,
@@ -133,8 +147,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0894954d-2db2-4195-a45b-ffec0bc0187e')
     def test_delete_image_that_is_not_yet_active(self):
-        # Return an error while trying to delete an image what is creating
-
+        """Test deleting a non-active server image should fail"""
         image = self.create_image_from_server(self.server_id)
         if api_version_utils.compare_version_header_to_response(
             "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
diff --git a/tempest/api/compute/images/test_list_image_filters.py b/tempest/api/compute/images/test_list_image_filters.py
index 2ac7de3..15b8a00 100644
--- a/tempest/api/compute/images/test_list_image_filters.py
+++ b/tempest/api/compute/images/test_list_image_filters.py
@@ -15,7 +15,7 @@
 
 import time
 
-import six
+import io
 import testtools
 
 from tempest.api.compute import base
@@ -31,6 +31,8 @@
 
 
 class ListImageFiltersTestJSON(base.BaseV2ComputeTest):
+    """Test listing server images with compute microversion less than 2.36"""
+
     max_microversion = '2.35'
 
     @classmethod
@@ -83,7 +85,7 @@
             # Wait 1 second between creation and upload to ensure a delta
             # between created_at and updated_at.
             time.sleep(1)
-            image_file = six.BytesIO((b'*' * 1024))
+            image_file = io.BytesIO((b'*' * 1024))
             if CONF.image_feature_enabled.api_v1:
                 cls.glance_client.update_image(image_id, data=image_file)
             else:
@@ -129,8 +131,11 @@
 
     @decorators.idempotent_id('a3f5b513-aeb3-42a9-b18e-f091ef73254d')
     def test_list_images_filter_by_status(self):
-        # The list of images should contain only images with the
-        # provided status
+        """Test listing server images filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(**params)['images']
 
@@ -140,8 +145,11 @@
 
     @decorators.idempotent_id('33163b73-79f5-4d07-a7ea-9213bcc468ff')
     def test_list_images_filter_by_name(self):
-        # List of all images should contain the expected images filtered
-        # by name
+        """Test listing server images filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(**params)['images']
 
@@ -153,7 +161,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_id(self):
-        # The images should contain images filtered by server id
+        """Test listing images filtered by server id
+
+        The list of images should contain only images with the
+        provided server id.
+        """
         params = {'server': self.server1['id']}
         images = self.client.list_images(**params)['images']
 
@@ -169,7 +181,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_server_ref(self):
-        # The list of servers should be filtered by server ref
+        """Test listing images filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -188,7 +204,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_filter_by_type(self):
-        # The list of servers should be filtered by image type
+        """Test listing images filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(**params)['images']
 
@@ -202,13 +222,22 @@
 
     @decorators.idempotent_id('3a484ca9-67ba-451e-b494-7fcf28d32d62')
     def test_list_images_limit_results(self):
-        # Verify only the expected number of results are returned
+        """Test listing images with limited count
+
+        If we use limit=1 when listing images, then only 1 image should be
+        returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(**params)['images']
         self.assertEqual(1, len([x for x in images if 'id' in x]))
 
     @decorators.idempotent_id('18bac3ae-da27-436c-92a9-b22474d13aab')
     def test_list_images_filter_by_changes_since(self):
+        """Test listing images filtered by changes-since
+
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Verify only updated images are returned in the detailed list
 
         # Becoming ACTIVE will modify the updated time
@@ -220,8 +249,11 @@
 
     @decorators.idempotent_id('9b0ea018-6185-4f71-948a-a123a107988e')
     def test_list_images_with_detail_filter_by_status(self):
-        # Detailed list of all images should only contain images
-        # with the provided status
+        """Test listing server images details filtered by image status
+
+        The list of images should contain only images with the
+        provided image status.
+        """
         params = {'status': 'ACTIVE'}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -231,8 +263,11 @@
 
     @decorators.idempotent_id('644ea267-9bd9-4f3b-af9f-dffa02396a17')
     def test_list_images_with_detail_filter_by_name(self):
-        # Detailed list of all images should contain the expected
-        # images filtered by name
+        """Test listing server images details filtered by image name
+
+        The list of images should contain only images with the
+        provided image name.
+        """
         params = {'name': self.image1['name']}
         images = self.client.list_images(detail=True, **params)['images']
 
@@ -242,8 +277,11 @@
 
     @decorators.idempotent_id('ba2fa9a9-b672-47cc-b354-3b4c0600e2cb')
     def test_list_images_with_detail_limit_results(self):
-        # Verify only the expected number of results (with full details)
-        # are returned
+        """Test listing images details with limited count
+
+        If we use limit=1 when listing images with full details, then only 1
+        image should be returned.
+        """
         params = {'limit': '1'}
         images = self.client.list_images(detail=True, **params)['images']
         self.assertEqual(1, len(images))
@@ -252,7 +290,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_server_ref(self):
-        # Detailed list of servers should be filtered by server ref
+        """Test listing images details filtered by server link href
+
+        The list of images should contain only images with the
+        provided server link href.
+        """
         server_links = self.server2['links']
 
         # Try all server link types
@@ -271,7 +313,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
     def test_list_images_with_detail_filter_by_type(self):
-        # The detailed list of servers should be filtered by image type
+        """Test listing images details filtered by image type
+
+        The list of images should contain only images with the
+        provided image type.
+        """
         params = {'type': 'snapshot'}
         images = self.client.list_images(detail=True, **params)['images']
         self.client.show_image(self.image_ref)
@@ -286,8 +332,11 @@
 
     @decorators.idempotent_id('7d439e18-ac2e-4827-b049-7e18004712c4')
     def test_list_images_with_detail_filter_by_changes_since(self):
-        # Verify an update image is returned
+        """Test listing images details filtered by changes-since
 
+        The list of images should contain only images updated since the
+        provided changes-since value.
+        """
         # Becoming ACTIVE will modify the updated time
         # Filter by the image's created time
         params = {'changes-since': self.image1['created']}
diff --git a/tempest/api/compute/images/test_list_image_filters_negative.py b/tempest/api/compute/images/test_list_image_filters_negative.py
index 81c59f9..f77da4b 100644
--- a/tempest/api/compute/images/test_list_image_filters_negative.py
+++ b/tempest/api/compute/images/test_list_image_filters_negative.py
@@ -22,6 +22,12 @@
 
 
 class ListImageFiltersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing images using compute images API
+
+    Negative tests of listing images using compute images API with
+    microversion less than 2.36.
+    """
+
     max_microversion = '2.35'
 
     @classmethod
@@ -39,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('391b0440-432c-4d4b-b5da-c5096aa247eb')
     def test_get_nonexistent_image(self):
-        # Check raises a NotFound
+        """Test getting a non existent image should fail"""
         nonexistent_image = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound, self.client.show_image,
                           nonexistent_image)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 59848f6..3c4daf6 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -35,16 +35,16 @@
         cls.from_port = 22
         cls.to_port = 22
 
-    def setUp(cls):
-        super(SecurityGroupRulesTestJSON, cls).setUp()
+    def setUp(self):
+        super(SecurityGroupRulesTestJSON, self).setUp()
 
-        from_port = cls.from_port
-        to_port = cls.to_port
+        from_port = self.from_port
+        to_port = self.to_port
         group = {}
         ip_range = {}
-        cls.expected = {
+        self.expected = {
             'parent_group_id': None,
-            'ip_protocol': cls.ip_protocol,
+            'ip_protocol': self.ip_protocol,
             'from_port': from_port,
             'to_port': to_port,
             'ip_range': ip_range,
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0601bbe..ac18442 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -16,7 +16,6 @@
 import time
 
 from oslo_log import log
-import six
 
 from tempest.api.compute import base
 from tempest.common import compute
@@ -241,7 +240,7 @@
         except lib_exc.BadRequest as e:
             msg = ('Multiple possible networks found, use a Network ID to be '
                    'more specific.')
-            if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
+            if not CONF.compute.fixed_network_name and str(e) == msg:
                 raise
         else:
             ifs.append(iface)
@@ -427,3 +426,33 @@
                 CONF.compute.build_interval, original_ip_count):
             raise lib_exc.TimeoutException(
                 'Timed out while waiting for IP count to decrease.')
+
+
+class AttachInterfacesV270Test(AttachInterfacesTestBase):
+    """Test interface API with microversion greater than 2.69"""
+    min_microversion = '2.70'
+
+    @decorators.idempotent_id('2853f095-8277-4067-92bd-9f10bd4f8e0c')
+    @utils.services('network')
+    def test_create_get_list_interfaces(self):
+        """Test interface API with microversion greater than 2.69
+
+        Checking create, get, list interface APIs response schema.
+        """
+        server = self.create_test_server(wait_until='ACTIVE')
+        try:
+            iface = self.interfaces_client.create_interface(server['id'])[
+                'interfaceAttachment']
+            iface = waiters.wait_for_interface_status(
+                self.interfaces_client, server['id'], iface['port_id'],
+                'ACTIVE')
+        except lib_exc.BadRequest as e:
+            msg = ('Multiple possible networks found, use a Network ID to be '
+                   'more specific.')
+            if not CONF.compute.fixed_network_name and str(e) == msg:
+                raise
+        else:
+            # just to check the response schema
+            self.interfaces_client.show_interface(
+                server['id'], iface['port_id'])
+            self.interfaces_client.list_interfaces(server['id'])
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index d0f53fe..bd3f58d 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -24,6 +24,7 @@
 
 
 class ServersTestMultiNic(base.BaseV2ComputeTest):
+    """Test multiple networks in servers"""
 
     @classmethod
     def skip_checks(cls):
@@ -59,8 +60,11 @@
 
     @decorators.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_multiple_nics_order(self):
-        # Verify that the networks order given at the server creation is
-        # preserved within the server.
+        """Test verifying multiple networks order in server
+
+        The networks order given at the server creation is preserved within
+        the server.
+        """
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
         net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
 
@@ -95,6 +99,12 @@
 
     @decorators.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
     def test_verify_duplicate_network_nics(self):
+        """Test multiple duplicate networks can be used to create server
+
+        Creating server with networks [net1, net2, net1], the server can
+        be created successfully and all three networks are in the server
+        addresses.
+        """
         # Verify that server creation does not fail when more than one nic
         # is created on the same network.
         net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index a7e2187..58d4d7d 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -338,7 +338,9 @@
         found_devices = [d['tags'][0] for d in md_dict['devices']
                          if d.get('tags')]
         try:
-            self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+            self.assertEqual(
+                sorted(found_devices),
+                sorted(['nic-tag', 'volume-tag']))
             return True
         except Exception:
             return False
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index b95db5c..3d55696 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -20,6 +20,8 @@
 
 
 class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of listing servers"""
+
     create_default_network = True
 
     @classmethod
@@ -45,7 +47,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f')
     def test_list_servers_with_a_deleted_server(self):
-        # Verify deleted servers do not show by default in list servers
+        """Test that deleted servers do not show by default in list servers"""
         # List servers and verify server not returned
         body = self.client.list_servers()
         servers = body['servers']
@@ -56,7 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('ff01387d-c7ad-47b4-ae9e-64fa214638fe')
     def test_list_servers_by_non_existing_image(self):
-        # Listing servers for a non existing image returns empty list
+        """Test listing servers for a non existing image returns empty list"""
         body = self.client.list_servers(image='non_existing_image')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -64,7 +66,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5913660b-223b-44d4-a651-a0fbfd44ca75')
     def test_list_servers_by_non_existing_flavor(self):
-        # Listing servers by non existing flavor returns empty list
+        """Test listing servers by non existing flavor returns empty list"""
         body = self.client.list_servers(flavor='non_existing_flavor')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -72,7 +74,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e2c77c4a-000a-4af3-a0bd-629a328bde7c')
     def test_list_servers_by_non_existing_server_name(self):
-        # Listing servers for a non existent server name returns empty list
+        """Test listing servers for a non existent server name
+
+        Listing servers for a non existent server name should return empty
+        list.
+        """
+
         body = self.client.list_servers(name='non_existing_server_name')
         servers = body['servers']
         self.assertEmpty(servers)
@@ -80,9 +87,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fcdf192d-0f74-4d89-911f-1ec002b822c4')
     def test_list_servers_status_non_existing(self):
-        # When invalid status is specified, up to microversion 2.37,
-        # an empty list is returned, and starting from microversion 2.38,
-        # a 400 error is returned in that case.
+        """Test listing servers with non existing status
+
+        When invalid status is specified, up to microversion 2.37,
+        an empty list is returned, and starting from microversion 2.38,
+        a 400 error is returned in that case.
+        """
+
         if self.is_requested_microversion_compatible('2.37'):
             body = self.client.list_servers(status='non_existing_status')
             servers = body['servers']
@@ -94,6 +105,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d47c17fb-eebd-4287-8e95-f20a7e627b18')
     def test_list_servers_by_limits_greater_than_actual_count(self):
+        """Test listing servers by limit greater than actual count
+
+        Listing servers by limit greater than actual count should return
+        all servers.
+        """
+
         # Gather the complete list of servers in the project for reference
         full_list = self.client.list_servers()['servers']
         # List servers by specifying a greater value for limit
@@ -104,21 +121,21 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('679bc053-5e70-4514-9800-3dfab1a380a6')
     def test_list_servers_by_limits_pass_string(self):
-        # Return an error if a string value is passed for limit
+        """Test listing servers by non-integer limit should fail"""
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           limit='testing')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62610dd9-4713-4ee0-8beb-fd2c1aa7f950')
     def test_list_servers_by_limits_pass_negative_value(self):
-        # Return an error if a negative value for limit is passed
+        """Test listing servers by negative limit should fail"""
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           limit=-1)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('87d12517-e20a-4c9c-97b6-dd1628d6d6c9')
     def test_list_servers_by_changes_since_invalid_date(self):
-        # Return an error when invalid date format is passed
+        """Test listing servers by invalid changes-since format should fail"""
         params = {'changes-since': '2011/01/01'}
         self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
                           **params)
@@ -126,7 +143,12 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('74745ad8-b346-45b5-b9b8-509d7447fc1f')
     def test_list_servers_by_changes_since_future_date(self):
-        # Return an empty list when a date in the future is passed.
+        """Test listing servers by a future changes-since date
+
+        Return an empty list when a date in the future is passed as
+        changes-since value.
+        """
+
         # updated_at field may haven't been set at the point in the boot
         # process where build_request still exists, so add
         # {'status': 'ACTIVE'} along with changes-since as filter.
@@ -138,7 +160,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('93055106-2d34-46fe-af68-d9ddbf7ee570')
     def test_list_servers_detail_server_is_deleted(self):
-        # Server details are not listed for a deleted server
+        """Test listing servers detail should not contain deleted server"""
         body = self.client.list_servers(detail=True)
         servers = body['servers']
         actual = [srv for srv in servers
diff --git a/tempest/api/compute/servers/test_multiple_create.py b/tempest/api/compute/servers/test_multiple_create.py
index dcadace..10c76bb 100644
--- a/tempest/api/compute/servers/test_multiple_create.py
+++ b/tempest/api/compute/servers/test_multiple_create.py
@@ -19,11 +19,15 @@
 
 
 class MultipleCreateTestJSON(base.BaseV2ComputeTest):
+    """Test creating multiple servers in one request"""
     create_default_network = True
 
     @decorators.idempotent_id('61e03386-89c3-449c-9bb1-a06f423fd9d1')
     def test_multiple_create(self):
-        # Creating server with min_count=2, 2 servers will be created.
+        """Test creating multiple servers in one request
+
+        Creating server with min_count=2, 2 servers will be created.
+        """
         tenant_network = self.get_tenant_network()
         body, servers = compute.create_test_server(
             self.os_primary,
@@ -40,8 +44,12 @@
 
     @decorators.idempotent_id('864777fb-2f1e-44e3-b5b9-3eb6fa84f2f7')
     def test_multiple_create_with_reservation_return(self):
-        # Creating multiple servers with return_reservation_id=True,
-        # reservation_id will be returned.
+        """Test creating multiple servers with return_reservation_id=True
+
+        Creating multiple servers with return_reservation_id=True,
+        reservation_id will be returned.
+        """
+
         body = self.create_test_server(wait_until='ACTIVE',
                                        min_count=1,
                                        max_count=2,
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 6bdf83b..3a970dd 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -19,11 +19,12 @@
 
 
 class MultipleCreateNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of creating multiple servers in one request"""
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
     def test_min_count_less_than_one(self):
-        # Creating server with min_count=0 should fail.
+        """Test creating server with min_count=0 should fail"""
         invalid_min_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           min_count=invalid_min_count)
@@ -31,7 +32,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
     def test_min_count_non_integer(self):
-        # Creating server with non-integer min_count should fail.
+        """Test creating server with non-integer min_count should fail"""
         invalid_min_count = 2.5
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           min_count=invalid_min_count)
@@ -39,7 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
     def test_max_count_less_than_one(self):
-        # Creating server with max_count < 1 shoudld fail.
+        """Test creating server with max_count < 1 shoudld fail"""
         invalid_max_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
@@ -47,7 +48,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
     def test_max_count_non_integer(self):
-        # Creating server with non-integer max_count should fail.
+        """Test creating server with non-integer max_count should fail"""
         invalid_max_count = 2.5
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
@@ -55,7 +56,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
     def test_max_count_less_than_min_count(self):
-        # Creating server with max_count less than min_count should fail.
+        """Test creating server with max_count < min_count should fail"""
         min_count = 3
         max_count = 2
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 7931ca9..1308b19 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -14,9 +14,7 @@
 #    under the License.
 
 import struct
-
-import six
-import six.moves.urllib.parse as urlparse
+import urllib.parse as urlparse
 import urllib3
 
 from tempest.api.compute import base
@@ -26,11 +24,6 @@
 
 CONF = config.CONF
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
     """Test novnc console"""
@@ -74,7 +67,7 @@
         resp = urllib3.PoolManager().request('GET', vnc_url)
         # Make sure that the GET request was accepted by the novncproxy
         self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
-                         'initial call: ' + six.text_type(resp.status))
+                         'initial call: ' + str(resp.status))
         # Do some basic validation to make sure it is an expected HTML document
         resp_data = resp.data.decode()
         # This is needed in the case of example: <html lang="en">
@@ -116,18 +109,18 @@
             # single word(4 bytes).
             self.assertEqual(
                 data_length, 4, 'Expected authentication type None.')
-            self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+            self.assertIn(1, [int(data[i]) for i in (0, 3)],
                           'Expected authentication type None.')
         else:
             self.assertGreaterEqual(
                 len(data), 2, 'Expected authentication type None.')
             self.assertIn(
                 1,
-                [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+                [int(data[i + 1]) for i in range(int(data[0]))],
                 'Expected authentication type None.')
             # Send to the server that we only support authentication
             # type None
-            self._websocket.send_frame(six.int2byte(1))
+            self._websocket.send_frame(bytes((1,)))
 
             # The server should send 4 bytes of 0's if security
             # handshake succeeded
@@ -136,11 +129,11 @@
                 len(data), 4,
                 'Server did not think security was successful.')
             self.assertEqual(
-                [ord_func(i) for i in data], [0, 0, 0, 0],
+                [int(i) for i in data], [0, 0, 0, 0],
                 'Server did not think security was successful.')
 
         # Say to leave the desktop as shared as part of client initialization
-        self._websocket.send_frame(six.int2byte(1))
+        self._websocket.send_frame(bytes((1,)))
         # Get the server initialization packet back and make sure it is the
         # right structure where bytes 20-24 is the name length and
         # 24-N is the name
@@ -170,11 +163,11 @@
             self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
                                                 b'Protocols'),
             'Incorrect HTTP return status code: {}'.format(
-                six.text_type(self._websocket.response)
+                str(self._websocket.response)
             )
         )
         _required_header = 'upgrade: websocket'
-        _response = six.text_type(self._websocket.response).lower()
+        _response = str(self._websocket.response).lower()
         self.assertIn(
             _required_header,
             _response,
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index d477be0..deb21c7 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urlparse
+
 from oslo_log import log as logging
-from six.moves.urllib import parse as urlparse
 import testtools
 
 from tempest.api.compute import base
@@ -34,6 +35,8 @@
 
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
+    """Test server actions"""
+
     def setUp(self):
         # NOTE(afazekas): Normally we use the same server with all test cases,
         # but if it has an issue, we build a new one
@@ -84,6 +87,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
                           'Change password not available.')
     def test_change_server_password(self):
+        """Test changing server's password
+
+        The server's password should be set to the provided password and
+        the user can authenticate with the new password.
+        """
         # Since this test messes with the password and makes the
         # server unreachable, it should create its own server
         validation_resources = self.get_test_validation_resources(
@@ -147,17 +155,15 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
     def test_reboot_server_hard(self):
-        # The server should be power cycled
-        self._test_reboot_server('HARD')
+        """Test hard rebooting server
 
-    @decorators.skip_because(bug="1014647")
-    @decorators.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
-    def test_reboot_server_soft(self):
-        # The server should be signaled to reboot gracefully
-        self._test_reboot_server('SOFT')
+        The server should be power cycled.
+        """
+        self._test_reboot_server('HARD')
 
     @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
     def test_remove_server_all_security_groups(self):
+        """Test removing all security groups from server"""
         server = self.create_test_server(wait_until='ACTIVE')
 
         # Remove all Security group
@@ -223,7 +229,7 @@
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(rebuilt_server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password,
                 validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
@@ -232,12 +238,19 @@
 
     @decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
     def test_rebuild_server(self):
+        """Test rebuilding server
+
+        The server should be rebuilt using the provided image and data.
+        """
         self._test_rebuild_server()
 
     @decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
     def test_rebuild_server_in_stop_state(self):
-        # The server in stop state  should be rebuilt using the provided
-        # image and remain in SHUTOFF state
+        """Test rebuilding server in stop state
+
+        The server in stop state should be rebuilt using the provided
+        image and remain in SHUTOFF state.
+        """
         server = self.client.show_server(self.server_id)['server']
         old_image = server['image']['id']
         new_image = (self.image_ref_alt
@@ -274,6 +287,10 @@
     @decorators.idempotent_id('b68bd8d6-855d-4212-b59b-2e704044dace')
     @utils.services('volume')
     def test_rebuild_server_with_volume_attached(self):
+        """Test rebuilding server with volume attached
+
+        The volume should be attached to the instance after rebuild.
+        """
         # create a new volume and attach it to the server
         volume = self.create_volume()
 
@@ -294,7 +311,7 @@
                 self.os_primary)
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password=None,
                 pkey=validation_resources['keypair']['private_key'],
                 server=server,
@@ -333,6 +350,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm(self):
+        """Test resizing server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=False)
 
     @decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@@ -341,6 +359,7 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_volume_backed_server_confirm(self):
+        """Test resizing a volume backed server and then confirming"""
         # We have to create a new server that is volume-backed since the one
         # from setUp is not volume-backed.
         kwargs = {'volume_backed': True,
@@ -377,14 +396,18 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_confirm_from_stopped(self):
+        """Test resizing a stopped server and then confirming"""
         self._test_resize_server_confirm(self.server_id, stop=True)
 
     @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
     @testtools.skipUnless(CONF.compute_feature_enabled.resize,
                           'Resize not available.')
     def test_resize_server_revert(self):
-        # The server's RAM and disk space should return to its original
-        # values after a resize is reverted
+        """Test resizing server and then reverting
+
+        The server's RAM and disk space should return to its original
+        values after a resize is reverted.
+        """
 
         self.client.resize_server(self.server_id, self.flavor_ref_alt)
         # NOTE(zhufl): Explicitly delete the server to get a new one for later
@@ -405,10 +428,13 @@
                           'Resize not available.')
     @utils.services('volume')
     def test_resize_server_revert_with_volume_attached(self):
-        # Tests attaching a volume to a server instance and then resizing
-        # the instance. Once the instance is resized, revert the resize which
-        # should move the instance and volume attachment back to the original
-        # compute host.
+        """Test resizing a volume attached server and then reverting
+
+        Tests attaching a volume to a server instance and then resizing
+        the instance. Once the instance is resized, revert the resize which
+        should move the instance and volume attachment back to the original
+        compute host.
+        """
 
         # Create a blank volume and attach it to the server created in setUp.
         volume = self.create_volume()
@@ -437,7 +463,14 @@
                           'Snapshotting not available, backup not possible.')
     @utils.services('image')
     def test_create_backup(self):
-        # Positive test:create backup successfully and rotate backups correctly
+        """Test creating server backup
+
+        1. create server backup1 with rotation=2, there are 1 backup.
+        2. create server backup2 with rotation=2, there are 2 backups.
+        3. create server backup3, due to the rotation is 2, the first one
+           (backup1) will be deleted, so now there are still 2 backups.
+        """
+
         # create the first and the second backup
 
         # Check if glance v1 is available to determine which client to use. We
@@ -563,8 +596,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id and number of lines
+        """Test getting console output for a server
+
+        Should be able to GET the console output for a given server_id and
+        number of lines.
+        """
 
         # This reboot is necessary for outputting some console log after
         # creating an instance backup. If an instance backup, the console
@@ -579,6 +615,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_with_unlimited_size(self):
+        """Test getting server's console output with unlimited size
+
+        The console output lines length should be bigger than the one
+        of test_get_console_output.
+        """
         server = self.create_test_server(wait_until='ACTIVE')
 
         def _check_full_length_console_log():
@@ -597,8 +638,11 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
                           'Console output not supported.')
     def test_get_console_output_server_id_in_shutoff_status(self):
-        # Positive test:Should be able to GET the console output
-        # for a given server_id in SHUTOFF status
+        """Test getting console output for a server in SHUTOFF status
+
+        Should be able to GET the console output for a given server_id
+        in SHUTOFF status.
+        """
 
         # NOTE: SHUTOFF is irregular status. To avoid test instability,
         #       one server is created only for this test without using
@@ -614,6 +658,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_pause_unpause_server(self):
+        """Test pausing and unpausing server"""
         self.client.pause_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
         self.client.unpause_server(self.server_id)
@@ -623,6 +668,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
                           'Suspend is not available.')
     def test_suspend_resume_server(self):
+        """Test suspending and resuming server"""
         self.client.suspend_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id,
                                        'SUSPENDED')
@@ -634,6 +680,7 @@
                           'Shelve is not available.')
     @utils.services('image')
     def test_shelve_unshelve_server(self):
+        """Test shelving and unshelving server"""
         if CONF.image_feature_enabled.api_v2:
             glance_client = self.os_primary.image_client_v2
         elif CONF.image_feature_enabled.api_v1:
@@ -673,6 +720,7 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
                           'Pause is not available.')
     def test_shelve_paused_server(self):
+        """Test shelving a paused server"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.client.pause_server(server['id'])
         waiters.wait_for_server_status(self.client, server['id'], 'PAUSED')
@@ -682,6 +730,7 @@
 
     @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
     def test_stop_start_server(self):
+        """Test stopping and starting server"""
         self.client.stop_server(self.server_id)
         waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
         self.client.start_server(self.server_id)
@@ -689,6 +738,12 @@
 
     @decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
     def test_lock_unlock_server(self):
+        """Test locking and unlocking server
+
+        Lock the server, and trying to stop it will fail because locked
+        server is not allowed to be stopped by non-admin user.
+        Then unlock the server, now the server can be stopped and started.
+        """
         # Lock the server,try server stop(exceptions throw),unlock it and retry
         self.client.lock_server(self.server_id)
         self.addCleanup(self.client.unlock_server, self.server_id)
@@ -714,6 +769,10 @@
     @testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
                           'VNC Console feature is disabled.')
     def test_get_vnc_console(self):
+        """Test getting vnc console from a server
+
+        The returned vnc console url should be in valid format.
+        """
         if self.is_requested_microversion_compatible('2.5'):
             body = self.client.get_vnc_console(
                 self.server_id, type='novnc')['console']
diff --git a/tempest/api/compute/servers/test_server_addresses.py b/tempest/api/compute/servers/test_server_addresses.py
index c936ce5..5a3f5d0 100644
--- a/tempest/api/compute/servers/test_server_addresses.py
+++ b/tempest/api/compute/servers/test_server_addresses.py
@@ -19,6 +19,7 @@
 
 
 class ServerAddressesTestJSON(base.BaseV2ComputeTest):
+    """Test server addresses"""
     create_default_network = True
 
     @classmethod
@@ -36,8 +37,10 @@
     @decorators.idempotent_id('6eb718c0-02d9-4d5e-acd1-4e0c269cef39')
     @utils.services('network')
     def test_list_server_addresses(self):
-        # All public and private addresses for
-        # a server should be returned
+        """Test listing server address
+
+        All public and private addresses for a server should be returned.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
@@ -51,8 +54,11 @@
     @decorators.idempotent_id('87bbc374-5538-4f64-b673-2b0e4443cc30')
     @utils.services('network')
     def test_list_server_addresses_by_network(self):
-        # Providing a network type should filter
-        # the addresses return by that type
+        """Test listing server addresses filtered by network addresses
+
+        Providing a network address should filter the addresses same with
+        the specified one.
+        """
 
         addresses = self.client.list_addresses(self.server['id'])['addresses']
 
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 9d87e1c..9f93e76 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -14,13 +14,26 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
 
+
+# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
 class ServerMetadataTestJSON(base.BaseV2ComputeTest):
+    """Test server metadata"""
+
     create_default_network = True
 
     @classmethod
+    def skip_checks(cls):
+        super(ServerMetadataTestJSON, cls).skip_checks()
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise cls.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
+    @classmethod
     def setup_clients(cls):
         super(ServerMetadataTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
@@ -37,7 +50,10 @@
 
     @decorators.idempotent_id('479da087-92b3-4dcf-aeb3-fd293b2d14ce')
     def test_list_server_metadata(self):
-        # All metadata key/value pairs for a server should be returned
+        """Test listing server metadata
+
+        All metadata key/value pairs for a server should be returned.
+        """
         resp_metadata = (self.client.list_server_metadata(self.server['id'])
                          ['metadata'])
 
@@ -47,7 +63,10 @@
 
     @decorators.idempotent_id('211021f6-21de-4657-a68f-908878cfe251')
     def test_set_server_metadata(self):
-        # The server's metadata should be replaced with the provided values
+        """Test setting server metadata
+
+        The server's metadata should be replaced with the provided values
+        """
         # Create a new set of metadata for the server
         req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
         self.client.set_server_metadata(self.server['id'], req_metadata)
@@ -60,8 +79,10 @@
 
     @decorators.idempotent_id('344d981e-0c33-4997-8a5d-6c1d803e4134')
     def test_update_server_metadata(self):
-        # The server's metadata values should be updated to the
-        # provided values
+        """Test updating server metadata
+
+        The server's metadata values should be updated to the provided values.
+        """
         meta = {'key1': 'alt1', 'key3': 'value3'}
         self.client.update_server_metadata(self.server['id'], meta)
 
@@ -73,8 +94,11 @@
 
     @decorators.idempotent_id('0f58d402-e34a-481d-8af8-b392b17426d9')
     def test_update_metadata_empty_body(self):
-        # The original metadata should not be lost if empty metadata body is
-        # passed
+        """Test updating server metadata to empty values
+
+        The original server metadata should not be lost if empty metadata
+        body is passed.
+        """
         meta = {}
         self.client.update_server_metadata(self.server['id'], meta)
         resp_metadata = (self.client.list_server_metadata(self.server['id'])
@@ -84,15 +108,19 @@
 
     @decorators.idempotent_id('3043c57d-7e0e-49a6-9a96-ad569c265e6a')
     def test_get_server_metadata_item(self):
-        # The value for a specific metadata key should be returned
+        """Test getting specific server metadata item"""
         meta = self.client.show_server_metadata_item(self.server['id'],
                                                      'key2')['meta']
         self.assertEqual('value2', meta['key2'])
 
     @decorators.idempotent_id('58c02d4f-5c67-40be-8744-d3fa5982eb1c')
     def test_set_server_metadata_item(self):
-        # The item's value should be updated to the provided value
-        # Update the metadata value
+        """Test updating specific server metadata item
+
+        The metadata item's value should be updated to the provided value.
+        """
+
+        # Update the metadata value.
         meta = {'nova': 'alt'}
         self.client.set_server_metadata_item(self.server['id'], 'nova', meta)
 
@@ -104,7 +132,10 @@
 
     @decorators.idempotent_id('127642d6-4c7b-4486-b7cd-07265a378658')
     def test_delete_server_metadata_item(self):
-        # The metadata value/key pair should be deleted from the server
+        """Test deleting server metadata item
+
+        The metadata value/key pair should be deleted from the server.
+        """
         self.client.delete_server_metadata_item(self.server['id'], 'key1')
 
         # Verify the metadata item has been removed
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 5688af1..655909c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -14,12 +14,17 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of server metadata"""
+
     create_default_network = True
 
     @classmethod
@@ -36,10 +41,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('fe114a8f-3a57-4eff-9ee2-4e14628df049')
     def test_server_create_metadata_key_too_long(self):
+        """Test creating server with too long metadata key should fail"""
         # Attempt to start a server with a meta-data key that is > 255
         # characters
 
-        # Tryset_server_metadata_item a few values
+        # Try create a server with the metadata for a few values
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
@@ -52,7 +58,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('92431555-4d8b-467c-b95b-b17daa5e57ff')
     def test_create_server_metadata_blank_key(self):
-        # Blank key should trigger an error.
+        """Test creating server with blank metadata key should fail"""
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.create_test_server,
@@ -61,6 +67,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('4d9cd7a3-2010-4b41-b8fe-3bbf0b169466')
     def test_server_metadata_non_existent_server(self):
+        """Test getting metadata item for a non existent server should fail"""
         # GET on a non-existent server should not succeed
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -71,7 +78,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('f408e78e-3066-4097-9299-3b0182da812e')
     def test_list_server_metadata_non_existent_server(self):
-        # List metadata on a non-existent server should not succeed
+        """Test listing metadata for a non existent server should fail"""
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.list_server_metadata,
@@ -79,9 +86,15 @@
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0025fbd6-a4ba-4cde-b8c2-96805dcfdabc')
-    def test_wrong_key_passed_in_body(self):
-        # Raise BadRequest if key in uri does not match
-        # the key passed in body.
+    def test_set_metadata_invalid_key(self):
+        """Test setting server metadata item with wrong key in body
+
+        Raise BadRequest if key in uri does not match the key passed in body.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'testkey': 'testvalue'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata_item,
@@ -90,7 +103,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0df38c2a-3d4e-4db5-98d8-d4d9fa843a12')
     def test_set_metadata_non_existent_server(self):
-        # Set metadata on a non-existent server should not succeed
+        """Test setting metadata for a non existent server should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.NotFound,
@@ -101,7 +118,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8')
     def test_update_metadata_non_existent_server(self):
-        # An update should not happen for a non-existent server
+        """Test updating metadata for a non existent server should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'key1': 'value1', 'key2': 'value2'}
         self.assertRaises(lib_exc.NotFound,
@@ -112,7 +133,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a452f38c-05c2-4b47-bd44-a4f0bf5a5e48')
     def test_update_metadata_with_blank_key(self):
-        # Blank key should trigger an error
+        """Test updating server metadata to blank key should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_server_metadata,
@@ -121,7 +146,14 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6bbd88e1-f8b3-424d-ba10-ae21c45ada8d')
     def test_delete_metadata_non_existent_server(self):
-        # Should not be able to delete metadata item from a non-existent server
+        """Test deleting metadata item from a non existent server
+
+        Should not be able to delete metadata item from a non-existent server.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_server_metadata_item,
@@ -131,9 +163,15 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('d8c0a210-a5c3-4664-be04-69d96746b547')
     def test_metadata_items_limit(self):
-        # A 403 Forbidden or 413 Overlimit (old behaviour) exception
-        # will be raised while exceeding metadata items limit for
-        # tenant.
+        """Test set/update server metadata over limit should fail
+
+        A 403 Forbidden or 413 Overlimit (old behaviour) exception
+        will be raised while exceeding metadata items limit for project.
+        """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
         quota_metadata = quota_set['metadata_items']
@@ -157,8 +195,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('96100343-7fa9-40d8-80fa-d29ef588ce1c')
     def test_set_server_metadata_blank_key(self):
-        # Raise a bad request error for blank key.
-        # set_server_metadata will replace all metadata with new value
+        """Test setting server metadata with blank key should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
@@ -167,8 +208,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('64a91aee-9723-4863-be44-4c9d9f1e7d0e')
     def test_set_server_metadata_missing_metadata(self):
-        # Raise a bad request error for a missing metadata field
-        # set_server_metadata will replace all metadata with new value
+        """Test setting server metadata without metadata field should fail"""
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
diff --git a/tempest/api/compute/servers/test_server_password.py b/tempest/api/compute/servers/test_server_password.py
index 7b31ede..f61d4fd 100644
--- a/tempest/api/compute/servers/test_server_password.py
+++ b/tempest/api/compute/servers/test_server_password.py
@@ -19,6 +19,8 @@
 
 
 class ServerPasswordTestJSON(base.BaseV2ComputeTest):
+    """Test server password"""
+
     create_default_network = True
 
     @classmethod
@@ -28,8 +30,10 @@
 
     @decorators.idempotent_id('f83b582f-62a8-4f22-85b0-0dee50ff783a')
     def test_get_server_password(self):
+        """Test getting password of a server"""
         self.servers_client.show_password(self.server['id'])
 
     @decorators.idempotent_id('f8229e8b-b625-4493-800a-bde86ac611ea')
     def test_delete_server_password(self):
+        """Test deleting password from a server"""
         self.servers_client.delete_password(self.server['id'])
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ba2adbb..8a05e7a 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -29,6 +29,7 @@
 
 class ServerPersonalityTestJSON(base.BaseV2ComputeTest):
     """Test servers with injected files"""
+    max_microversion = '2.56'
 
     @classmethod
     def setup_credentials(cls):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 5445113..354e3b9 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -157,16 +158,28 @@
             self.servers_client, server_id, 'ACTIVE')
 
 
-class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
-    """Test rescuing server specifying type of device for the rescue disk"""
+class ServerStableDeviceRescueTestIDE(BaseServerStableDeviceRescueTest):
+    """Test rescuing server using an IDE device for the rescue disk"""
+
+    @classmethod
+    def skip_checks(cls):
+        super().skip_checks()
+        if not CONF.compute_feature_enabled.ide_bus:
+            raise cls.skipException("IDE bus not available.")
 
     @decorators.idempotent_id('947004c3-e8ef-47d9-9f00-97b74f9eaf96')
+    @testtools.skipIf("aarch64" in CONF.scenario.img_file,
+                      "Aarch64 does not support ide bus for cdrom")
     def test_stable_device_rescue_cdrom_ide(self):
         """Test rescuing server with cdrom and ide as the rescue disk"""
         server_id, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='cdrom', hw_rescue_bus='ide')
         self._test_stable_device_rescue(server_id, rescue_image_id)
 
+
+class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
+    """Test rescuing server specifying type of device for the rescue disk"""
+
     @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
     def test_stable_device_rescue_disk_virtio(self):
         """Test rescuing server with disk and virtio as the rescue disk"""
@@ -189,6 +202,7 @@
         self._test_stable_device_rescue(server_id, rescue_image_id)
 
     @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+    @utils.services('volume')
     def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
         """Test rescuing server with volume attached
 
@@ -214,6 +228,13 @@
 
     min_microversion = '2.87'
 
+    @classmethod
+    def skip_checks(cls):
+        super(ServerBootFromVolumeStableRescueTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
     @decorators.attr(type='slow')
     @decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
     def test_stable_device_rescue_bfv_blank_volume(self):
diff --git a/tempest/api/compute/servers/test_server_tags.py b/tempest/api/compute/servers/test_server_tags.py
index 619f480..c988788 100644
--- a/tempest/api/compute/servers/test_server_tags.py
+++ b/tempest/api/compute/servers/test_server_tags.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.compute import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
@@ -84,11 +82,11 @@
         new_tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
         replaced_tags = self.client.update_all_tags(
             self.server['id'], new_tags)['tags']
-        six.assertCountEqual(self, new_tags, replaced_tags)
+        self.assertCountEqual(new_tags, replaced_tags)
 
         # List the tags and check that the tags were replaced.
         fetched_tags = self.client.list_tags(self.server['id'])['tags']
-        six.assertCountEqual(self, new_tags, fetched_tags)
+        self.assertCountEqual(new_tags, fetched_tags)
 
     @decorators.idempotent_id('a63b2a74-e918-4b7c-bcab-10c855f3a57e')
     def test_delete_all_tags(self):
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 3a4bd6d..1c839eb 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -25,6 +25,7 @@
 
 
 class ServersTestJSON(base.BaseV2ComputeTest):
+    """Test servers API"""
     create_default_network = True
 
     @classmethod
@@ -37,8 +38,11 @@
                           enable_instance_password,
                           'Instance password not available.')
     def test_create_server_with_admin_password(self):
-        # If an admin password is provided on server creation, the server's
-        # root password should be set to that password.
+        """Test creating server with admin password
+
+        If an admin password is provided on server creation, the server's
+        root password should be set to that password.
+        """
         server = self.create_test_server(adminPass='testpassword')
         self.addCleanup(self.delete_server, server['id'])
 
@@ -47,8 +51,7 @@
 
     @decorators.idempotent_id('8fea6be7-065e-47cf-89b8-496e6f96c699')
     def test_create_with_existing_server_name(self):
-        # Creating a server with a name that already exists is allowed
-
+        """Test creating a server with already existing name is allowed"""
         # TODO(sdague): clear out try, we do cleanup one layer up
         server_name = data_utils.rand_name(
             self.__class__.__name__ + '-server')
@@ -69,8 +72,7 @@
 
     @decorators.idempotent_id('f9e15296-d7f9-4e62-b53f-a04e89160833')
     def test_create_specify_keypair(self):
-        # Specify a keypair while creating a server
-
+        """Test creating server with keypair"""
         key_name = data_utils.rand_name('key')
         self.keypairs_client.create_keypair(name=key_name)
         self.addCleanup(self.keypairs_client.delete_keypair, key_name)
@@ -97,11 +99,11 @@
 
     @decorators.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2')
     def test_update_server_name(self):
-        # The server name should be changed to the provided value
+        """Test updating server name to the provided value"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
         # Update instance name with non-ASCII characters
-        prefix_name = u'\u00CD\u00F1st\u00E1\u00F1c\u00E9'
+        prefix_name = '\u00CD\u00F1st\u00E1\u00F1c\u00E9'
         self._update_server_name(server['id'], 'ACTIVE', prefix_name)
 
         # stop server and check server name update again
@@ -115,7 +117,7 @@
 
     @decorators.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077')
     def test_update_access_server_address(self):
-        # The server's access addresses should reflect the provided values
+        """Test updating server's access addresses to the provided value"""
         server = self.create_test_server(wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
 
@@ -132,7 +134,7 @@
 
     @decorators.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb')
     def test_create_server_with_ipv6_addr_only(self):
-        # Create a server without an IPv4 address(only IPv6 address).
+        """Test creating server with ipv6 address only(no ipv4 address)"""
         server = self.create_test_server(accessIPv6='2001:2001::3',
                                          wait_until='ACTIVE')
         self.addCleanup(self.delete_server, server['id'])
@@ -142,17 +144,22 @@
     @decorators.related_bug('1730756')
     @decorators.idempotent_id('defbaca5-d611-49f5-ae21-56ee25d2db49')
     def test_create_server_specify_multibyte_character_name(self):
-        # prefix character is:
-        # http://unicode.org/cldr/utility/character.jsp?a=20A1
+        """Test creating server with multi character name
 
-        # We use a string with 3 byte utf-8 character due to nova
-        # will return 400(Bad Request) if we attempt to send a name which has
-        # 4 byte utf-8 character.
+        prefix character is:
+        http://unicode.org/cldr/utility/character.jsp?a=20A1
+
+        We use a string with 3 byte utf-8 character due to nova
+        will return 400(Bad Request) if we attempt to send a name which has
+        4 byte utf-8 character.
+        """
         utf8_name = data_utils.rand_name(b'\xe2\x82\xa1'.decode('utf-8'))
         self.create_test_server(name=utf8_name, wait_until='ACTIVE')
 
 
 class ServerShowV247Test(base.BaseV2ComputeTest):
+    """Test servers API with compute microversion greater than 2.46"""
+
     min_microversion = '2.47'
     max_microversion = 'latest'
 
@@ -164,12 +171,14 @@
 
     @decorators.idempotent_id('88b0bdb2-494c-11e7-a919-92ebcb67fe33')
     def test_show_server(self):
+        """Test getting server detail"""
         server = self.create_test_server()
         # All fields will be checked by API schema
         self.servers_client.show_server(server['id'])
 
     @decorators.idempotent_id('8de397c2-57d0-4b90-aa30-e5d668f21a8b')
     def test_update_rebuild_list_server(self):
+        """Test update/rebuild/list server"""
         server = self.create_test_server()
         # Checking update API response schema
         self.servers_client.update_server(server['id'])
@@ -184,6 +193,8 @@
 
 
 class ServerShowV263Test(base.BaseV2ComputeTest):
+    """Test servers API with compute microversion greater than 2.62"""
+
     min_microversion = '2.63'
     max_microversion = 'latest'
 
@@ -195,6 +206,7 @@
                           'required to test image certificate validation.')
     @decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
     def test_show_update_rebuild_list_server(self):
+        """Test show/update/rebuild/list server"""
         trusted_certs = CONF.compute.certified_image_trusted_certs
         server = self.create_test_server(
             image_id=CONF.compute.certified_image_ref,
diff --git a/tempest/api/compute/servers/test_servers_microversions.py b/tempest/api/compute/servers/test_servers_microversions.py
index 2434884..566d04a 100644
--- a/tempest/api/compute/servers/test_servers_microversions.py
+++ b/tempest/api/compute/servers/test_servers_microversions.py
@@ -32,11 +32,13 @@
 
 
 class ServerShowV254Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.53"""
     min_microversion = '2.54'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('09170a98-4940-4637-add7-1a35121f1a5a')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.53"""
         server = self.create_test_server(wait_until='ACTIVE')
         keypair_name = data_utils.rand_name(
             self.__class__.__name__ + '-keypair')
@@ -52,11 +54,13 @@
 
 
 class ServerShowV257Test(base.BaseV2ComputeTest):
+    """Test servers API schema for compute microversion greater than 2.56"""
     min_microversion = '2.57'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('803df848-080a-4261-8f11-b020cd9b6f60')
     def test_rebuild_server(self):
+        """Test rebuilding server with microversion greater than 2.56"""
         server = self.create_test_server(wait_until='ACTIVE')
         user_data = "ZWNobyAiaGVsbG8gd29ybGQi"
         # Checking rebuild API response schema
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 6676358..4f85048 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -60,7 +60,8 @@
         server = cls.create_test_server(wait_until='ACTIVE')
         cls.server_id = server['id']
 
-        server = cls.create_test_server()
+        # Wait until the instance is active to avoid the delete racing
+        server = cls.create_test_server(wait_until='ACTIVE')
         cls.client.delete_server(server['id'])
         waiters.wait_for_server_termination(cls.client, server['id'])
         cls.deleted_server_id = server['id']
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index dfd6ca4..b2e02c5 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -28,6 +28,8 @@
 # TODO(mriedem): Remove this test class once the nova queens branch goes into
 # extended maintenance mode.
 class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+    """Test virtual interfaces API with compute microversion less than 2.44"""
+
     max_microversion = '2.43'
 
     depends_on_nova_network = True
@@ -47,9 +49,7 @@
     @decorators.idempotent_id('96c4e2ef-5e4d-4d7f-87f5-fed6dca18016')
     @utils.services('network')
     def test_list_virtual_interfaces(self):
-        # Positive test:Should be able to GET the virtual interfaces list
-        # for a given server_id
-
+        """Test listing virtual interfaces of a server"""
         if CONF.service_available.neutron:
             with testtools.ExpectedException(exceptions.BadRequest):
                 self.client.list_virtual_interfaces(self.server['id'])
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index f6e8bc9..5667281 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -23,6 +23,12 @@
 # TODO(mriedem): Remove this test class once the nova queens branch goes into
 # extended maintenance mode.
 class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+    """Negative tests of virtual interfaces API
+
+    Negative tests of virtual interfaces API for compute microversion less
+    than 2.44.
+    """
+
     max_microversion = '2.43'
 
     depends_on_nova_network = True
@@ -37,8 +43,7 @@
     @decorators.idempotent_id('64ebd03c-1089-4306-93fa-60f5eb5c803c')
     @utils.services('network')
     def test_list_virtual_interfaces_invalid_server_id(self):
-        # Negative test: Should not be able to GET virtual interfaces
-        # for an invalid server_id
+        """Test listing virtual interfaces of an invalid server should fail"""
         invalid_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.servers_client.list_virtual_interfaces,
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index a62492d..5fe0e3b 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -20,6 +20,7 @@
 
 
 class QuotasTestJSON(base.BaseV2ComputeTest):
+    """Test compute quotas"""
 
     @classmethod
     def skip_checks(cls):
@@ -59,7 +60,7 @@
 
     @decorators.idempotent_id('f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107')
     def test_get_quotas(self):
-        # User can get the quota set for it's tenant
+        """Test user can get the compute quota set for it's project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = self.client.show_quota_set(self.tenant_id)['quota_set']
         self.assertEqual(quota_set['id'], self.tenant_id)
@@ -75,7 +76,7 @@
 
     @decorators.idempotent_id('9bfecac7-b966-4f47-913f-1a9e2c12134a')
     def test_get_default_quotas(self):
-        # User can get the default quota set for it's tenant
+        """Test user can get the default compute quota set for it's project"""
         expected_quota_set = self.default_quota_set | set(['id'])
         quota_set = (self.client.show_default_quota_set(self.tenant_id)
                      ['quota_set'])
@@ -85,7 +86,7 @@
 
     @decorators.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc')
     def test_compare_tenant_quotas_with_default_quotas(self):
-        # Tenants are created with the default quota values
+        """Test tenants are created with the default compute quota values"""
         default_quota_set = \
             self.client.show_default_quota_set(self.tenant_id)['quota_set']
         tenant_quota_set = (self.client.show_quota_set(self.tenant_id)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index d85e4f7..7251e36 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -200,6 +200,10 @@
         super(AttachVolumeShelveTestJSON, cls).skip_checks()
         if not CONF.compute_feature_enabled.shelve:
             raise cls.skipException('Shelve is not available.')
+        if CONF.compute.compute_volume_common_az:
+            # assuming cross_az_attach is set to false in nova.conf
+            # per the compute_volume_common_az option description
+            raise cls.skipException('Cross AZ attach not available.')
 
     def _count_volumes(self, server, validation_resources):
         # Count number of volumes on an instance
diff --git a/tempest/api/compute/volumes/test_volume_snapshots.py b/tempest/api/compute/volumes/test_volume_snapshots.py
index f3ccf8d..30bea60 100644
--- a/tempest/api/compute/volumes/test_volume_snapshots.py
+++ b/tempest/api/compute/volumes/test_volume_snapshots.py
@@ -24,6 +24,7 @@
 
 
 class VolumesSnapshotsTestJSON(base.BaseV2ComputeTest):
+    """Test volume snapshots with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -48,6 +49,7 @@
 
     @decorators.idempotent_id('cd4ec87d-7825-450d-8040-6e2068f2da8f')
     def test_volume_snapshot_create_get_list_delete(self):
+        """Test create/get/list/delete volume snapshot"""
         volume = self.create_volume()
         self.addCleanup(self.delete_volume, volume['id'])
 
diff --git a/tempest/api/compute/volumes/test_volumes_get.py b/tempest/api/compute/volumes/test_volumes_get.py
index 0d23c1f..554f418 100644
--- a/tempest/api/compute/volumes/test_volumes_get.py
+++ b/tempest/api/compute/volumes/test_volumes_get.py
@@ -25,6 +25,7 @@
 
 
 class VolumesGetTestJSON(base.BaseV2ComputeTest):
+    """Test compute volumes API with microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -45,7 +46,7 @@
 
     @decorators.idempotent_id('f10f25eb-9775-4d9d-9cbe-1cf54dae9d5f')
     def test_volume_create_get_delete(self):
-        # CREATE, GET, DELETE Volume
+        """Test create/get/delete volume"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         # Create volume
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 28bc174..0b37264 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -21,6 +21,8 @@
 
 
 class VolumesTestJSON(base.BaseV2ComputeTest):
+    """Test listing volumes with compute microversion less than 2.36"""
+
     # NOTE: This test creates a number of 1G volumes. To run successfully,
     # ensure that the backing file for the volume group that Nova uses
     # has space for at least 3 1G volumes!
@@ -57,7 +59,7 @@
 
     @decorators.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
     def test_volume_list(self):
-        # Should return the list of Volumes
+        """Test listing volumes should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes()['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -72,7 +74,7 @@
 
     @decorators.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
     def test_volume_list_with_details(self):
-        # Should return the list of Volumes with details
+        """Test listing volumes with detail should return all volumes"""
         # Fetch all Volumes
         fetched_list = self.client.list_volumes(detail=True)['volumes']
         # Now check if all the Volumes created in setup are in fetched list
@@ -87,7 +89,11 @@
 
     @decorators.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
     def test_volume_list_param_limit(self):
-        # Return the list of volumes based on limit set
+        """Test listing volumes based on limit set
+
+        If we list volumes with limit=2, then only 2 volumes should be
+        returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(**params)['volumes']
 
@@ -96,7 +102,11 @@
 
     @decorators.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
     def test_volume_list_with_detail_param_limit(self):
-        # Return the list of volumes with details based on limit set.
+        """Test listing volumes with detail based on limit set
+
+        If we list volumes with detail with limit=2, then only 2 volumes with
+        detail should be returned.
+        """
         params = {'limit': 2}
         fetched_vol_list = self.client.list_volumes(detail=True,
                                                     **params)['volumes']
@@ -106,7 +116,12 @@
 
     @decorators.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
     def test_volume_list_param_offset_and_limit(self):
-        # Return the list of volumes based on offset and limit set.
+        """Test listing volumes based on offset and limit set
+
+        If we list volumes with offset=1 and limit=1, then 1 volume located
+        in the position 1 in the all volumes list should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes()['volumes']
         params = {'offset': 1, 'limit': 1}
@@ -123,7 +138,13 @@
 
     @decorators.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
     def test_volume_list_with_detail_param_offset_and_limit(self):
-        # Return the list of volumes details based on offset and limit set.
+        """Test listing volumes with detail based on offset and limit set
+
+        If we list volumes with detail with offset=1 and limit=1, then 1
+        volume with detail located in the position 1 in the all volumes list
+        should be returned.
+        (The items in the all volumes list start from position 0.)
+        """
         # get all volumes list
         all_vol_list = self.client.list_volumes(detail=True)['volumes']
         params = {'offset': 1, 'limit': 1}
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index 444ce93..f553e32 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -23,6 +23,7 @@
 
 
 class VolumesNegativeTest(base.BaseV2ComputeTest):
+    """Negative tests of volumes with compute microversion less than 2.36"""
 
     # These tests will fail with a 404 starting from microversion 2.36. For
     # more information, see:
@@ -44,7 +45,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c03ea686-905b-41a2-8748-9635154b7c57')
     def test_volume_get_nonexistent_volume_id(self):
-        # Negative: Should not be able to get details of nonexistent volume
+        """Test getting details of a non existent volume should fail"""
         # Creating a nonexistent volume id
         # Trying to GET a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.show_volume,
@@ -53,7 +54,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('54a34226-d910-4b00-9ef8-8683e6c55846')
     def test_volume_delete_nonexistent_volume_id(self):
-        # Negative: Should not be able to delete nonexistent Volume
+        """Test deleting a nonexistent volume should fail"""
         # Creating nonexistent volume id
         # Trying to DELETE a non existent volume
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
@@ -62,8 +63,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('5125ae14-152b-40a7-b3c5-eae15e9022ef')
     def test_create_volume_with_invalid_size(self):
-        # Negative: Should not be able to create volume with invalid size
-        # in request
+        """Test creating volume with invalid size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -72,8 +72,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('131cb3a1-75cc-4d40-b4c3-1317f64719b0')
     def test_create_volume_without_passing_size(self):
-        # Negative: Should not be able to create volume without passing size
-        # in request
+        """Test creating volume without specifying size should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -82,7 +81,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8cce995e-0a83-479a-b94d-e1e40b8a09d1')
     def test_create_volume_with_size_zero(self):
-        # Negative: Should not be able to create volume with size zero
+        """Test creating volume with size=0 should fail"""
         v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
         metadata = {'Type': 'work'}
         self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
@@ -91,14 +90,13 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62bab09a-4c03-4617-8cca-8572bc94af9b')
     def test_get_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to get volume when empty ID is passed
+        """Test getting volume details without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.show_volume, '')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('62972737-124b-4513-b6cf-2f019f178494')
     def test_delete_invalid_volume_id(self):
-        # Negative: Should not be able to delete volume when invalid ID is
-        # passed
+        """Test deleting volume with an invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_volume,
                           data_utils.rand_name('invalid'))
@@ -106,5 +104,5 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0d1417c5-4ae8-4c2c-adc5-5f0b864253e5')
     def test_delete_volume_without_passing_volume_id(self):
-        # Negative: Should not be able to delete volume when empty ID is passed
+        """Test deleting volume without volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/identity/admin/v3/test_application_credentials.py b/tempest/api/identity/admin/v3/test_application_credentials.py
index c9cafd8..f5b0356 100644
--- a/tempest/api/identity/admin/v3/test_application_credentials.py
+++ b/tempest/api/identity/admin/v3/test_application_credentials.py
@@ -37,7 +37,7 @@
         secret = app_cred['secret']
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index dd7d5af..e5137f4 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -142,6 +142,26 @@
         self.roles_client.delete_role_from_user_on_domain(
             self.domain['id'], self.user_body['id'], self.role['id'])
 
+    @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+                      'Skipped because environment has an immutable user '
+                      'source and solely provides read-only access to users.')
+    @decorators.idempotent_id('e5a81737-d294-424d-8189-8664858aae4c')
+    def test_grant_list_revoke_role_to_user_on_system(self):
+        self.roles_client.create_user_role_on_system(
+            self.user_body['id'], self.role['id'])
+
+        roles = self.roles_client.list_user_roles_on_system(
+            self.user_body['id'])['roles']
+
+        self.assertEqual(1, len(roles))
+        self.assertEqual(self.role['id'], roles[0]['id'])
+
+        self.roles_client.check_user_role_existence_on_system(
+            self.user_body['id'], self.role['id'])
+
+        self.roles_client.delete_role_from_user_on_system(
+            self.user_body['id'], self.role['id'])
+
     @decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
     @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
                       'Skipped because environment has an immutable user '
@@ -197,6 +217,23 @@
         self.roles_client.delete_role_from_group_on_domain(
             self.domain['id'], self.group_body['id'], self.role['id'])
 
+    @decorators.idempotent_id('c888fe4f-8018-48db-b959-542225c1b4b6')
+    def test_grant_list_revoke_role_to_group_on_system(self):
+        self.roles_client.create_group_role_on_system(
+            self.group_body['id'], self.role['id'])
+
+        roles = self.roles_client.list_group_roles_on_system(
+            self.group_body['id'])['roles']
+
+        self.assertEqual(1, len(roles))
+        self.assertEqual(self.role['id'], roles[0]['id'])
+
+        self.roles_client.check_role_from_group_on_system_existence(
+            self.group_body['id'], self.role['id'])
+
+        self.roles_client.delete_role_from_group_on_system(
+            self.group_body['id'], self.role['id'])
+
     @decorators.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
     def test_list_roles(self):
         """Test listing roles"""
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index f3a7471..5bbd65c 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.identity import base
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -70,8 +68,8 @@
         orig_expires_at = token_auth['token']['expires_at']
         orig_user = token_auth['token']['user']
 
-        self.assertIsInstance(token_auth['token']['expires_at'], six.text_type)
-        self.assertIsInstance(token_auth['token']['issued_at'], six.text_type)
+        self.assertIsInstance(token_auth['token']['expires_at'], str)
+        self.assertIsInstance(token_auth['token']['issued_at'], str)
         self.assertEqual(['password'], token_auth['token']['methods'])
         self.assertEqual(user['id'], token_auth['token']['user']['id'])
         self.assertEqual(user['name'], token_auth['token']['user']['name'])
@@ -91,7 +89,7 @@
 
         self.assertEqual(orig_expires_at, token_auth['token']['expires_at'],
                          'Expiration time should match original token')
-        self.assertIsInstance(token_auth['token']['issued_at'], six.text_type)
+        self.assertIsInstance(token_auth['token']['issued_at'], str)
         self.assertEqual(set(['password', 'token']),
                          set(token_auth['token']['methods']))
         self.assertEqual(orig_user, token_auth['token']['user'],
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 282343c..5722f0e 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -192,6 +192,7 @@
             cls.os_primary.identity_versions_v3_client
         cls.non_admin_app_creds_client = \
             cls.os_primary.application_credentials_client
+        cls.non_admin_access_rules_client = cls.os_primary.access_rules_client
 
 
 class BaseIdentityV3AdminTest(BaseIdentityV3Test):
diff --git a/tempest/api/identity/v2/test_tokens.py b/tempest/api/identity/v2/test_tokens.py
index a928ad9..d3776b8 100644
--- a/tempest/api/identity/v2/test_tokens.py
+++ b/tempest/api/identity/v2/test_tokens.py
@@ -14,7 +14,6 @@
 #    under the License.
 
 from oslo_utils import timeutils
-import six
 from tempest.api.identity import base
 from tempest.lib import decorators
 
@@ -36,7 +35,7 @@
         body = token_client.auth(username, password, tenant_name)
 
         self.assertNotEmpty(body['token']['id'])
-        self.assertIsInstance(body['token']['id'], six.string_types)
+        self.assertIsInstance(body['token']['id'], str)
 
         now = timeutils.utcnow()
         expires_at = timeutils.normalize_time(
diff --git a/tempest/api/identity/v3/test_access_rules.py b/tempest/api/identity/v3/test_access_rules.py
new file mode 100644
index 0000000..608eb59
--- /dev/null
+++ b/tempest/api/identity/v3/test_access_rules.py
@@ -0,0 +1,84 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+CONF = config.CONF
+
+
+class AccessRulesV3Test(base.BaseIdentityV3Test):
+
+    @classmethod
+    def skip_checks(cls):
+        super(AccessRulesV3Test, cls).skip_checks()
+        if not CONF.identity_feature_enabled.access_rules:
+            raise cls.skipException("Application credential access rules are "
+                                    "not available in this environment")
+
+    @classmethod
+    def resource_setup(cls):
+        super(AccessRulesV3Test, cls).resource_setup()
+        cls.user_id = cls.os_primary.credentials.user_id
+        cls.project_id = cls.os_primary.credentials.project_id
+
+    def setUp(self):
+        super(AccessRulesV3Test, self).setUp()
+        ac = self.non_admin_app_creds_client
+        access_rules = [
+            {
+                "path": "/v2.1/servers/*/ips",
+                "method": "GET",
+                "service": "compute"
+            }
+        ]
+        self.app_cred = ac.create_application_credential(
+            self.user_id,
+            name=data_utils.rand_name('application_credential'),
+            access_rules=access_rules
+        )['application_credential']
+
+    @decorators.idempotent_id('2354c498-5119-4ba5-9f0d-44f16f78fb0e')
+    def test_list_access_rules(self):
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(1, len(ar['access_rules']))
+
+    @decorators.idempotent_id('795dd507-ca1e-40e9-ba90-ff0a08689ba4')
+    def test_show_access_rule(self):
+        access_rule_id = self.app_cred['access_rules'][0]['id']
+        self.non_admin_access_rules_client.show_access_rule(
+            self.user_id, access_rule_id)
+
+    @decorators.idempotent_id('278757e9-e193-4bf8-adf2-0b0a229a17d0')
+    def test_delete_access_rule(self):
+        access_rule_id = self.app_cred['access_rules'][0]['id']
+        app_cred_id = self.app_cred['id']
+        self.assertRaises(
+            lib_exc.Forbidden,
+            self.non_admin_access_rules_client.delete_access_rule,
+            self.user_id,
+            access_rule_id)
+        self.non_admin_app_creds_client.delete_application_credential(
+            self.user_id, app_cred_id)
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(1, len(ar['access_rules']))
+        self.non_admin_access_rules_client.delete_access_rule(
+            self.user_id, access_rule_id)
+        ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+        self.assertEqual(0, len(ar['access_rules']))
diff --git a/tempest/api/identity/v3/test_application_credentials.py b/tempest/api/identity/v3/test_application_credentials.py
index ef1bbdf..06734aa 100644
--- a/tempest/api/identity/v3/test_application_credentials.py
+++ b/tempest/api/identity/v3/test_application_credentials.py
@@ -19,8 +19,11 @@
 from oslo_utils import timeutils
 
 from tempest.api.identity import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class ApplicationCredentialsV3Test(base.BaseApplicationCredentialsV3Test):
     """Test application credentials"""
@@ -48,7 +51,7 @@
         self.assertNotIn('secret', app_cred)
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
@@ -65,6 +68,24 @@
         expires_str = expires_at.isoformat()
         self.assertEqual(expires_str, app_cred['expires_at'])
 
+    @decorators.idempotent_id('529936eb-aa5d-463d-9f79-01c113d3b88f')
+    def test_create_application_credential_access_rules(self):
+        if not CONF.identity_feature_enabled.access_rules:
+            raise self.skipException("Application credential access rules are "
+                                     "not available in this environment")
+        access_rules = [
+            {
+                "path": "/v2.1/servers/*/ips",
+                "method": "GET",
+                "service": "compute"
+            }
+        ]
+        app_cred = self.create_application_credential(
+            access_rules=access_rules)
+        access_rule_resp = app_cred['access_rules'][0]
+        access_rule_resp.pop('id')
+        self.assertDictEqual(access_rules[0], access_rule_resp)
+
     @decorators.idempotent_id('ff0cd457-6224-46e7-b79e-0ada4964a8a6')
     def test_list_application_credentials(self):
         """Test listing application credentials"""
diff --git a/tempest/api/identity/v3/test_ec2_credentials.py b/tempest/api/identity/v3/test_ec2_credentials.py
new file mode 100644
index 0000000..a2cbc4a
--- /dev/null
+++ b/tempest/api/identity/v3/test_ec2_credentials.py
@@ -0,0 +1,113 @@
+# Copyright 2020 SUSE LLC
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.identity import base
+from tempest.common import utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class EC2CredentialsTest(base.BaseIdentityV3Test):
+
+    @classmethod
+    def skip_checks(cls):
+        super(EC2CredentialsTest, cls).skip_checks()
+        if not utils.is_extension_enabled('OS-EC2', 'identity'):
+            msg = "OS-EC2 identity extension not enabled."
+            raise cls.skipException(msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(EC2CredentialsTest, cls).resource_setup()
+        cls.creds = cls.os_primary.credentials
+
+    @decorators.idempotent_id('b0f55a29-54e5-4166-999d-712347e0c920')
+    def test_create_ec2_credential(self):
+        """Create user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        access = resp['access']
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, access)
+        self.assertNotEmpty(resp['access'])
+        self.assertNotEmpty(resp['secret'])
+        self.assertEqual(self.creds.user_id, resp['user_id'])
+        self.assertEqual(self.creds.tenant_id, resp['tenant_id'])
+
+    @decorators.idempotent_id('897813f0-160c-4fdc-aabc-24ee635ce4a9')
+    def test_list_ec2_credentials(self):
+        """Get the list of user ec2 credentials."""
+        created_creds = []
+        # create first ec2 credentials
+        creds1 = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        created_creds.append(creds1['access'])
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, creds1['access'])
+
+        # create second ec2 credentials
+        creds2 = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        created_creds.append(creds2['access'])
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, creds2['access'])
+
+        # get the list of user ec2 credentials
+        resp = self.non_admin_users_client.list_user_ec2_credentials(
+            self.creds.user_id)["credentials"]
+        fetched_creds = [cred['access'] for cred in resp]
+        # created credentials should be in a fetched list
+        missing = [cred for cred in created_creds
+                   if cred not in fetched_creds]
+        self.assertEmpty(missing,
+                         "Failed to find ec2_credentials %s in fetched list" %
+                         ', '.join(cred for cred in missing))
+
+    @decorators.idempotent_id('8b8d1010-5958-48df-a6cd-5e3df72e6bcf')
+    def test_show_ec2_credential(self):
+        """Get the definite user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        self.addCleanup(
+            self.non_admin_users_client.delete_user_ec2_credential,
+            self.creds.user_id, resp['access'])
+
+        ec2_creds = self.non_admin_users_client.show_user_ec2_credential(
+            self.creds.user_id, resp['access']
+        )["credential"]
+        for key in ['access', 'secret', 'user_id', 'tenant_id']:
+            self.assertEqual(ec2_creds[key], resp[key])
+
+    @decorators.idempotent_id('9408d61b-8be0-4a8d-9b85-14f61edb456b')
+    def test_delete_ec2_credential(self):
+        """Delete user ec2 credential."""
+        resp = self.non_admin_users_client.create_user_ec2_credential(
+            self.creds.user_id,
+            tenant_id=self.creds.tenant_id)["credential"]
+        access = resp['access']
+        self.non_admin_users_client.delete_user_ec2_credential(
+            self.creds.user_id, access)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.non_admin_users_client.show_user_ec2_credential,
+            self.creds.user_id,
+            access)
diff --git a/tempest/api/identity/v3/test_tokens.py b/tempest/api/identity/v3/test_tokens.py
index b201285..55fcead 100644
--- a/tempest/api/identity/v3/test_tokens.py
+++ b/tempest/api/identity/v3/test_tokens.py
@@ -16,7 +16,6 @@
 import operator
 
 from oslo_utils import timeutils
-import six
 
 from tempest.api.identity import base
 from tempest.lib import decorators
@@ -88,7 +87,7 @@
             auth_data=True)
 
         self.assertNotEmpty(token_id)
-        self.assertIsInstance(token_id, six.string_types)
+        self.assertIsInstance(token_id, str)
 
         now = timeutils.utcnow()
         expires_at = timeutils.normalize_time(
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index ae7b3e4..23e7fd8 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -12,12 +12,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.common import image as common_image
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
 import tempest.test
 
 CONF = config.CONF
@@ -112,7 +113,7 @@
         cls.alt_tenant_id = cls.alt_image_member_client.tenant_id
 
     def _create_image(self):
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         image = self.create_image(container_format='bare',
                                   disk_format='raw',
                                   is_public=False,
@@ -133,12 +134,6 @@
     def setup_clients(cls):
         super(BaseV2ImageTest, cls).setup_clients()
         cls.client = cls.os_primary.image_client_v2
-        cls.namespaces_client = cls.os_primary.namespaces_client
-        cls.resource_types_client = cls.os_primary.resource_types_client
-        cls.namespace_properties_client =\
-            cls.os_primary.namespace_properties_client
-        cls.namespace_objects_client = cls.os_primary.namespace_objects_client
-        cls.namespace_tags_client = cls.os_primary.namespace_tags_client
         cls.schemas_client = cls.os_primary.schemas_client
         cls.versions_client = cls.os_primary.image_versions_client
 
@@ -155,6 +150,15 @@
                         namespace_name)
         return namespace
 
+    @classmethod
+    def get_available_stores(cls):
+        stores = []
+        try:
+            stores = cls.client.info_stores()['stores']
+        except exceptions.NotFound:
+            pass
+        return stores
+
 
 class BaseV2MemberImageTest(BaseV2ImageTest):
 
@@ -194,3 +198,9 @@
     def setup_clients(cls):
         super(BaseV2ImageAdminTest, cls).setup_clients()
         cls.admin_client = cls.os_admin.image_client_v2
+        cls.namespaces_client = cls.os_admin.namespaces_client
+        cls.resource_types_client = cls.os_admin.resource_types_client
+        cls.namespace_properties_client =\
+            cls.os_admin.namespace_properties_client
+        cls.namespace_objects_client = cls.os_admin.namespace_objects_client
+        cls.namespace_tags_client = cls.os_admin.namespace_tags_client
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 595717e..6fd6c4e 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.image import base
 from tempest.common import image as common_image
@@ -72,7 +72,7 @@
             self.assertEqual(val, image.get('properties')[key])
 
         # Now try uploading an image file
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         body = self.client.update_image(image['id'], data=image_file)['image']
         self.assertIn('size', body)
         self.assertEqual(1024, body.get('size'))
@@ -204,7 +204,7 @@
         Note that the size of the new image is a random number between
         1024 and 4096
         """
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         image = cls.create_image(name=name,
                                  container_format=container_format,
@@ -306,7 +306,7 @@
                                disk_format, size):
         """Create a new standard image and return newly-registered image-id"""
 
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         name = 'New Standard Image %s' % name
         image = cls.create_image(name=name,
                                  container_format=container_format,
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index 7e13d7f..733c778 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -13,10 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
+
 from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
     """"Test image operations about image owner"""
@@ -52,3 +58,65 @@
         self.assertEqual(random_id_2, updated_image_info['owner'])
         self.assertNotEqual(created_image_info['owner'],
                             updated_image_info['owner'])
+
+
+class ImportCopyImagesTest(base.BaseV2ImageAdminTest):
+    """Test the import copy-image operations"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportCopyImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('9b3b644e-03d1-11eb-a036-fa163e2eaf49')
+    def test_image_copy_image_import(self):
+        """Test 'copy-image' import functionalities
+
+        Create image, import image with copy-image method and
+        verify that import succeeded.
+        """
+        available_stores = self.get_available_stores()
+        available_import_methods = self.client.info_import()[
+            'import-methods']['value']
+        # NOTE(gmann): Skip if copy-image import method and multistore
+        # are not available.
+        if ('copy-image' not in available_import_methods or
+            not available_stores):
+            raise self.skipException('Either copy-image import method or '
+                                     'multistore is not available')
+        uuid = data_utils.rand_uuid()
+        image_name = data_utils.rand_name('copy-image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private',
+                                  ramdisk_id=uuid)
+        self.assertEqual('queued', image['status'])
+
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual(len(file_content), body.get('size'))
+        self.assertEqual('active', body['status'])
+
+        # Copy image to all the stores. In case of all_stores request
+        # glance will skip the stores where image is already available.
+        self.admin_client.image_import(image['id'], method='copy-image',
+                                       all_stores=True,
+                                       all_stores_must_succeed=False)
+
+        # Wait for copy to finished on all stores.
+        failed_stores = waiters.wait_for_image_copied_to_stores(
+            self.client, image['id'])
+        # Assert if copy is failed on any store.
+        self.assertEqual(0, len(failed_stores),
+                         "Failed to copy the following stores: %s" %
+                         str(failed_stores))
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
similarity index 98%
rename from tempest/api/image/v2/test_images_metadefs_namespace_objects.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
index 32b81b1..9222920 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_objects.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_objects.py
@@ -16,7 +16,7 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespaceObjectsTest(base.BaseV2ImageTest):
+class MetadataNamespaceObjectsTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace objects basic functionality"""
 
     def _create_namespace_object(self, namespace):
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
similarity index 97%
rename from tempest/api/image/v2/test_images_metadefs_namespace_properties.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
index 1d4f0a6..10dfba1 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_properties.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_properties.py
@@ -15,7 +15,7 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespacePropertiesTest(base.BaseV2ImageTest):
+class MetadataNamespacePropertiesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace property basic functionality"""
 
     @decorators.idempotent_id('b1a3765e-3a5d-4f6d-a3a7-3ca3476ae768')
diff --git a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py b/tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
similarity index 98%
rename from tempest/api/image/v2/test_images_metadefs_namespace_tags.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
index dc64185..9e88e03 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespace_tags.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespace_tags.py
@@ -16,7 +16,7 @@
 from tempest.lib import decorators
 
 
-class MetadataNamespaceTagsTest(base.BaseV2ImageTest):
+class MetadataNamespaceTagsTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition namespace tags basic functionality"""
 
     tags = [
diff --git a/tempest/api/image/v2/test_images_metadefs_namespaces.py b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
similarity index 98%
rename from tempest/api/image/v2/test_images_metadefs_namespaces.py
rename to tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
index 502949f..0fe49f9 100644
--- a/tempest/api/image/v2/test_images_metadefs_namespaces.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
@@ -20,7 +20,7 @@
 from tempest.lib import exceptions as lib_exc
 
 
-class MetadataNamespacesTest(base.BaseV2ImageTest):
+class MetadataNamespacesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition Namespaces basic functionality"""
 
     @decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
diff --git a/tempest/api/image/v2/test_images_metadefs_resource_types.py b/tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
similarity index 97%
rename from tempest/api/image/v2/test_images_metadefs_resource_types.py
rename to tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
index 6867f2d..e533c43 100644
--- a/tempest/api/image/v2/test_images_metadefs_resource_types.py
+++ b/tempest/api/image/v2/admin/test_images_metadefs_resource_types.py
@@ -17,7 +17,7 @@
 from tempest.lib import decorators
 
 
-class MetadataResourceTypesTest(base.BaseV2ImageTest):
+class MetadataResourceTypesTest(base.BaseV2ImageAdminTest):
     """Test the Metadata definition resource types basic functionality"""
 
     @decorators.idempotent_id('6f358a4e-5ef0-11e6-a795-080027d0d606')
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index c1a7211..d283ab3 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -14,12 +14,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import io
 import random
 
-import six
-
 from oslo_log import log as logging
 from tempest.api.image import base
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -68,6 +68,23 @@
         self.assertEqual('queued', image['status'])
         return image
 
+    def _require_import_method(self, method):
+        if method not in self.available_import_methods:
+            raise self.skipException('Server does not support '
+                                     '%s import method' % method)
+
+    def _stage_and_check(self):
+        image = self._create_image()
+        # Stage image data
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.stage_image_file(image['id'], image_file)
+        # Check image status is 'uploading'
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('uploading', body['status'])
+        return image['id']
+
     @decorators.idempotent_id('32ca0c20-e16f-44ac-8590-07869c9b4cc2')
     def test_image_glance_direct_import(self):
         """Test 'glance-direct' import functionalities
@@ -75,21 +92,27 @@
         Create image, stage image data, import image and verify
         that import succeeded.
         """
-        if 'glance-direct' not in self.available_import_methods:
-            raise self.skipException('Server does not support '
-                                     'glance-direct import method')
-        image = self._create_image()
-        # Stage image data
-        file_content = data_utils.random_bytes()
-        image_file = six.BytesIO(file_content)
-        self.client.stage_image_file(image['id'], image_file)
-        # Check image status is 'uploading'
-        body = self.client.show_image(image['id'])
-        self.assertEqual(image['id'], body['id'])
-        self.assertEqual('uploading', body['status'])
+        self._require_import_method('glance-direct')
+
+        image_id = self._stage_and_check()
         # import image from staging to backend
-        self.client.image_import(image['id'], method='glance-direct')
-        self.client.wait_for_resource_activation(image['id'])
+        resp = self.client.image_import(image_id, method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.client, image_id)
+
+        if not self.versions_client.has_version('2.12'):
+            # API is not new enough to support image/tasks API
+            LOG.info('Glance does not support v2.12, so I am unable to '
+                     'validate the image/tasks API.')
+            return
+
+        tasks = waiters.wait_for_image_tasks_status(
+            self.client, image_id, 'success')
+        self.assertEqual(1, len(tasks))
+        task = tasks[0]
+        self.assertEqual(resp.response['x-openstack-request-id'],
+                         task['request_id'])
+        self.assertEqual('glance-direct',
+                         task['input']['import_req']['method']['name'])
 
     @decorators.idempotent_id('f6feb7a4-b04f-4706-a011-206129f83e62')
     def test_image_web_download_import(self):
@@ -98,9 +121,8 @@
         Create image, import image and verify that import
         succeeded.
         """
-        if 'web-download' not in self.available_import_methods:
-            raise self.skipException('Server does not support '
-                                     'web-download import method')
+        self._require_import_method('web-download')
+
         image = self._create_image()
         # Now try to get image details
         body = self.client.show_image(image['id'])
@@ -110,7 +132,137 @@
         image_uri = CONF.image.http_image
         self.client.image_import(image['id'], method='web-download',
                                  image_uri=image_uri)
-        self.client.wait_for_resource_activation(image['id'])
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'])
+
+    @decorators.idempotent_id('e04761a1-22af-42c2-b8bc-a34a3f12b585')
+    def test_remote_import(self):
+        """Test image import against a different worker than stage.
+
+        This creates and stages an image against the primary API worker,
+        but then calls import on a secondary worker (if available) to
+        test that distributed image import works (i.e. proxies the import
+        request to the proper worker).
+        """
+        self._require_import_method('glance-direct')
+
+        if not CONF.image.alternate_image_endpoint:
+            raise self.skipException('No image_remote service to test '
+                                     'against')
+
+        image_id = self._stage_and_check()
+        # import image from staging to backend, but on the alternate worker
+        self.os_primary.image_client_remote.image_import(
+            image_id, method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.client, image_id)
+
+    @decorators.idempotent_id('44d60544-1524-42f7-8899-315301105dd8')
+    def test_remote_delete(self):
+        """Test image delete against a different worker than stage.
+
+        This creates and stages an image against the primary API worker,
+        but then calls delete on a secondary worker (if available) to
+        test that distributed image import works (i.e. proxies the delete
+        request to the proper worker).
+        """
+        self._require_import_method('glance-direct')
+
+        if not CONF.image.alternate_image_endpoint:
+            raise self.skipException('No image_remote service to test '
+                                     'against')
+
+        image_id = self._stage_and_check()
+        # delete image from staging to backend, but on the alternate worker
+        self.os_primary.image_client_remote.delete_image(image_id)
+        self.client.wait_for_resource_deletion(image_id)
+
+
+class MultiStoresImportImagesTest(base.BaseV2ImageTest):
+    """Test importing image in multiple stores"""
+    @classmethod
+    def skip_checks(cls):
+        super(MultiStoresImportImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(MultiStoresImportImagesTest, cls).resource_setup()
+        cls.available_import_methods = cls.client.info_import()[
+            'import-methods']['value']
+        if not cls.available_import_methods:
+            raise cls.skipException('Server does not support '
+                                    'any import method')
+
+        # NOTE(pdeore): Skip if glance-direct import method and mutlistore
+        # are not enabled/configured, or only one store is configured in
+        # multiple stores setup.
+        cls.available_stores = cls.get_available_stores()
+        if ('glance-direct' not in cls.available_import_methods or
+                not len(cls.available_stores) > 1):
+            raise cls.skipException(
+                'Either glance-direct import method not present in %s or '
+                'None or only one store is '
+                'configured %s' % (cls.available_import_methods,
+                                   cls.available_stores))
+
+    def _create_and_stage_image(self, all_stores=False):
+        """Create Image & stage image file for glance-direct import method."""
+        image_name = data_utils.rand_name('test-image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private')
+        self.assertEqual('queued', image['status'])
+
+        self.client.stage_image_file(
+            image['id'],
+            io.BytesIO(data_utils.random_bytes()))
+        # Check image status is 'uploading'
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('uploading', body['status'])
+
+        if all_stores:
+            stores_list = ','.join([store['id']
+                                    for store in self.available_stores])
+        else:
+            stores = [store['id'] for store in self.available_stores]
+            stores_list = stores[::len(stores) - 1]
+
+        return body, stores_list
+
+    @decorators.idempotent_id('bf04ff00-3182-47cb-833a-f1c6767b47fd')
+    def test_glance_direct_import_image_to_all_stores(self):
+        """Test image is imported in all available stores
+
+        Create image, import image to all available stores using glance-direct
+        import method and verify that import succeeded.
+        """
+        image, stores = self._create_and_stage_image(all_stores=True)
+
+        self.client.image_import(
+            image['id'], method='glance-direct', all_stores=True)
+
+        waiters.wait_for_image_imported_to_stores(self.client,
+                                                  image['id'], stores)
+
+    @decorators.idempotent_id('82fb131a-dd2b-11ea-aec7-340286b6c574')
+    def test_glance_direct_import_image_to_specific_stores(self):
+        """Test image is imported in all available stores
+
+        Create image, import image to specified store(s) using glance-direct
+        import method and verify that import succeeded.
+        """
+        image, stores = self._create_and_stage_image()
+        self.client.image_import(image['id'], method='glance-direct',
+                                 stores=stores)
+
+        waiters.wait_for_image_imported_to_stores(self.client, image['id'],
+                                                  (','.join(stores)))
 
 
 class BasicOperationsImagesTest(base.BaseV2ImageTest):
@@ -150,7 +302,7 @@
 
         # Now try uploading an image file
         file_content = data_utils.random_bytes()
-        image_file = six.BytesIO(file_content)
+        image_file = io.BytesIO(file_content)
         self.client.store_image_file(image['id'], image_file)
 
         # Now try to get image details
@@ -227,7 +379,7 @@
 
         # Upload an image file
         content = data_utils.random_bytes()
-        image_file = six.BytesIO(content)
+        image_file = io.BytesIO(content)
         self.client.store_image_file(image['id'], image_file)
 
         # Deactivate image
@@ -277,7 +429,7 @@
         1024 and 4096
         """
         size = random.randint(1024, 4096)
-        image_file = six.BytesIO(data_utils.random_bytes(size))
+        image_file = io.BytesIO(data_utils.random_bytes(size))
         tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
         image = cls.create_image(container_format=container_format,
                                  disk_format=disk_format,
@@ -312,7 +464,8 @@
         # Validate that the list was fetched sorted accordingly
         msg = 'No images were found that met the filter criteria.'
         self.assertNotEmpty(images_list, msg)
-        sorted_list = [image['size'] for image in images_list]
+        sorted_list = [image['size'] for image in images_list
+                       if image['size'] is not None]
         msg = 'The list of images was not sorted correctly.'
         self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
 
@@ -459,7 +612,7 @@
     def test_list_images_param_member_status(self):
         """Test listing images by member_status and visibility"""
         # Create an image to be shared using default visibility
-        image_file = six.BytesIO(data_utils.random_bytes(2048))
+        image_file = io.BytesIO(data_utils.random_bytes(2048))
         container_format = CONF.image.container_formats[0]
         disk_format = CONF.image.disk_formats[0]
         image = self.create_image(container_format=container_format,
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index dc2bb96..a3802a9 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -14,11 +14,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
 from tempest.api.image import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ImagesNegativeTest(base.BaseV2ImageTest):
 
@@ -114,3 +119,102 @@
         self.assertRaises(lib_exc.Forbidden,
                           self.client.delete_image,
                           image['id'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('a0ae75d4-ce9c-4576-b823-aba04c8acabd')
+    def test_update_image_reserved_property(self):
+        """Attempt to add a reserved property to an image.
+
+        Glance bans some internal-use-only properties such that they will
+        cause a PATCH to fail. Since os_glance_* is banned, we can use a
+        key in that namespace here.
+        """
+        if not CONF.image_feature_enabled.os_glance_reserved:
+            raise self.skipException('os_glance_reserved is not enabled')
+
+        image = self.create_image(name='test',
+                                  container_format='bare',
+                                  disk_format='raw')
+        self.assertRaises(lib_exc.Forbidden,
+                          self.client.update_image,
+                          image['id'], [{'add': '/os_glance_foo',
+                                         'value': 'bar'}])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('e3fb7df8-2742-4143-8976-f1b26870f0a0')
+    def test_create_image_reserved_property(self):
+        """Attempt to create an image with a reserved property.
+
+        Glance bans some internal-use-only properties such that they will
+        cause an image create to fail. Since os_glance_* is banned, we can
+        use a key in that namespace here.
+        """
+        if not CONF.image_feature_enabled.os_glance_reserved:
+            raise self.skipException('os_glance_reserved is not enabled')
+
+        self.assertRaises(lib_exc.Forbidden,
+                          self.create_image,
+                          name='test',
+                          container_format='bare',
+                          disk_format='raw',
+                          os_glance_foo='bar')
+
+
+class ImportImagesNegativeTest(base.BaseV2ImageTest):
+    """Here we test the import operations for image"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportImagesNegativeTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def resource_setup(cls):
+        super(ImportImagesNegativeTest, cls).resource_setup()
+        cls.available_import_methods = cls.client.info_import()[
+            'import-methods']['value']
+        if not cls.available_import_methods:
+            raise cls.skipException('Server does not support '
+                                    'any import method')
+
+        cls.available_stores = cls.get_available_stores()
+        if not len(cls.available_stores) > 0:
+            raise cls.skipException(
+                'No stores configured %s' % cls.available_stores)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('c52f6a77-f522-4411-8dbe-9d14f2b1ba6b')
+    def test_image_web_download_import_with_bad_url(self):
+        """Test 'web-download' import functionalities
+
+        Make sure that web-download with invalid URL fails properly.
+        """
+        if 'web-download' not in self.available_import_methods:
+            raise self.skipException('Server does not support '
+                                     'web-download import method')
+        image = self.client.create_image(name='test',
+                                         container_format='bare',
+                                         disk_format='raw')
+        # Now try to get image details
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual('queued', body['status'])
+        stores = self.get_available_stores()
+        # import image from web to backend
+        image_uri = 'http://does-not.exist/no/possible/way'
+        self.client.image_import(image['id'], method='web-download',
+                                 image_uri=image_uri,
+                                 stores=[stores[0]['id']])
+
+        start_time = int(time.time())
+        while int(time.time()) - start_time < self.client.build_timeout:
+            body = self.client.show_image(image['id'])
+            if body.get('os_glance_failed_import'):
+                # Store ended up in failed list, which is good
+                return
+            time.sleep(self.client.build_interval)
+
+        self.fail('Image never reported failed store')
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
index ad7dfb3..a8dae7c 100644
--- a/tempest/api/network/admin/test_floating_ips_admin_actions.py
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -62,7 +62,7 @@
         This test performs below operations:
         1. Create couple floating ips for admin and non-admin users.
         2. Verify if admin can access all floating ips including other user
-           and non-admin user can only access its own floating ips.
+        and non-admin user can only access its own floating ips.
         """
         # Create floating ip from admin user
         floating_ip_admin = self.admin_floating_ips_client.create_floatingip(
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index ff5026b..905bf13 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
 from tempest.api.network import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
@@ -65,6 +63,8 @@
             name=data_utils.rand_name(self.__class__.__name__),
             allowed_address_pairs=allowed_address_pairs)
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
 
@@ -82,6 +82,8 @@
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         port_id = body['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        port_id)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, port_id)
         if mac_address is None:
@@ -95,8 +97,7 @@
         body = self.ports_client.update_port(
             port_id, allowed_address_pairs=allowed_address_pairs)
         allowed_address_pair = body['port']['allowed_address_pairs']
-        six.assertCountEqual(self, allowed_address_pair,
-                             allowed_address_pairs)
+        self.assertCountEqual(allowed_address_pair, allowed_address_pairs)
 
     @decorators.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
     def test_update_port_with_address_pair(self):
@@ -115,6 +116,8 @@
             network_id=self.network['id'],
             name=data_utils.rand_name(self.__class__.__name__))
         newportid = resp['port']['id']
+        self.addCleanup(self.ports_client.wait_for_resource_deletion,
+                        newportid)
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.ports_client.delete_port, newportid)
         ipaddress = resp['port']['fixed_ips'][0]['ip_address']
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index c32d3c1..64f6e80 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -15,9 +15,9 @@
 
 from tempest.api.network import base
 from tempest.common import utils
-from tempest.common.utils import data_utils
 from tempest.common.utils import net_utils
 from tempest import config
+from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
@@ -66,7 +66,7 @@
         cls.create_router_interface(cls.router['id'], cls.subnet['id'])
         # Create two ports one each for Creation and Updating of floatingIP
         cls.ports = []
-        for i in range(2):
+        for _ in range(2):
             port = cls.create_port(cls.network)
             cls.ports.append(port)
 
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7646b63..caaf964 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 import netaddr
-import six
 import testtools
 
 from tempest.api.network import base
@@ -595,9 +594,9 @@
         subnets = [sub['id'] for sub in body['subnets']
                    if sub['network_id'] == network['id']]
         test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
-        six.assertCountEqual(self, subnets,
-                             test_subnet_ids,
-                             'Subnet are not in the same network')
+        self.assertCountEqual(subnets,
+                              test_subnet_ids,
+                              'Subnet are not in the same network')
 
 
 class NetworksIpV6TestAttrs(BaseNetworkTestResources):
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 479578d..63078cd 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -16,7 +16,6 @@
 import ipaddress
 
 import netaddr
-import six
 import testtools
 
 from tempest.api.network import base_security_groups as sec_base
@@ -234,15 +233,15 @@
         # Get two IP addresses
         ip_address_1 = None
         ip_address_2 = None
-        ip_network = ipaddress.ip_network(six.text_type(subnet['cidr']))
+        ip_network = ipaddress.ip_network(str(subnet['cidr']))
         for ip in ip_network:
             if ip == ip_network.network_address:
                 continue
             if ip_address_1 is None:
-                ip_address_1 = six.text_type(ip)
+                ip_address_1 = str(ip)
             else:
                 ip_address_2 = ip_address_1
-                ip_address_1 = six.text_type(ip)
+                ip_address_1 = str(ip)
                 # Make sure these two IP addresses have different substring
                 if ip_address_1[:-1] != ip_address_2[:-1]:
                     break
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index e8f3f8b..478a834 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -13,12 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import time
-
 from tempest.common import custom_matchers
 from tempest import config
 from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
 from tempest.lib import exceptions as lib_exc
 import tempest.test
 
@@ -50,12 +47,11 @@
             _, objlist = container_client.list_container_objects(cont, params)
             # delete every object in the container
             for obj in objlist:
-                test_utils.call_and_ignore_notfound_exc(
-                    object_client.delete_object, cont, obj['name'])
-            # sleep 2 seconds to sync the deletion of the objects
-            # in HA deployment
-            time.sleep(2)
+                object_client.delete_object(cont, obj['name'])
+                object_client.wait_for_resource_deletion(obj['name'], cont)
+            # Verify resource deletion
             container_client.delete_container(cont)
+            container_client.wait_for_resource_deletion(cont)
         except lib_exc.NotFound:
             pass
 
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index c5c30e3..4966ec4 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -14,8 +14,6 @@
 #    under the License.
 
 import random
-
-import six
 import testtools
 
 from tempest.api.object_storage import base
@@ -28,6 +26,7 @@
 
 
 class AccountTest(base.BaseObjectTest):
+    """Test account metadata and containers"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -42,7 +41,7 @@
     def resource_setup(cls):
         super(AccountTest, cls).resource_setup()
         for i in range(ord('a'), ord('f') + 1):
-            name = data_utils.rand_name(name='%s-' % six.int2byte(i))
+            name = data_utils.rand_name(name='%s-' % bytes((i,)))
             cls.container_client.update_container(name)
             cls.addClassResourceCleanup(base.delete_containers,
                                         [name],
@@ -54,23 +53,22 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
     def test_list_containers(self):
-        # list of all containers should not be empty
+        """Test listing containers"""
         resp, container_list = self.account_client.list_account_containers()
         self.assertHeaders(resp, 'Account', 'GET')
 
         self.assertIsNotNone(container_list)
 
         for container_name in self.containers:
-            self.assertIn(six.text_type(container_name).encode('utf-8'),
+            self.assertIn(str(container_name).encode('utf-8'),
                           container_list)
 
     @decorators.idempotent_id('884ec421-fbad-4fcc-916b-0580f2699565')
     def test_list_no_containers(self):
-        # List request to empty account
+        """Test listing containers for an account without container"""
 
         # To test listing no containers, create new user other than
         # the base user of this instance.
-
         resp, container_list = \
             self.os_operator.account_client.list_account_containers()
 
@@ -103,7 +101,7 @@
 
     @decorators.idempotent_id('1c7efa35-e8a2-4b0b-b5ff-862c7fd83704')
     def test_list_containers_with_format_json(self):
-        # list containers setting format parameter to 'json'
+        """Test listing containers setting format parameter to 'json'"""
         params = {'format': 'json'}
         resp, container_list = self.account_client.list_account_containers(
             params=params)
@@ -115,7 +113,7 @@
 
     @decorators.idempotent_id('4477b609-1ca6-4d4b-b25d-ad3f01086089')
     def test_list_containers_with_format_xml(self):
-        # list containers setting format parameter to 'xml'
+        """Test listing containers setting format parameter to 'xml'"""
         params = {'format': 'xml'}
         resp, container_list = self.account_client.list_account_containers(
             params=params)
@@ -133,13 +131,18 @@
         not CONF.object_storage_feature_enabled.discoverability,
         'Discoverability function is disabled')
     def test_list_extensions(self):
+        """Test listing capabilities"""
         resp = self.capabilities_client.list_capabilities()
 
         self.assertThat(resp, custom_matchers.AreAllWellFormatted())
 
     @decorators.idempotent_id('5cfa4ab2-4373-48dd-a41f-a532b12b08b2')
     def test_list_containers_with_limit(self):
-        # list containers one of them, half of them then all of them
+        """Test listing containers with limit parameter
+
+        Listing containers limited to one of them, half of them, and then all
+        of them.
+        """
         for limit in (1, self.containers_count // 2,
                       self.containers_count):
             params = {'limit': limit}
@@ -151,10 +154,11 @@
 
     @decorators.idempotent_id('638f876d-6a43-482a-bbb3-0840bca101c6')
     def test_list_containers_with_marker(self):
-        # list containers using marker param
-        # first expect to get 0 container as we specified last
-        # the container as marker
-        # second expect to get the bottom half of the containers
+        """Test listing containers with marker parameter
+
+        First expect to get 0 container as we specified the last container
+        as marker, second expect to get the bottom half of the containers.
+        """
         params = {'marker': self.containers[-1]}
         resp, container_list = \
             self.account_client.list_account_containers(params=params)
@@ -172,10 +176,11 @@
 
     @decorators.idempotent_id('5ca164e4-7bde-43fa-bafb-913b53b9e786')
     def test_list_containers_with_end_marker(self):
-        # list containers using end_marker param
-        # first expect to get 0 container as we specified first container as
-        # end_marker
-        # second expect to get the top half of the containers
+        """Test listing containers with end_marker parameter
+
+        First expect to get 0 container as we specified first container as
+        end_marker, second expect to get the top half of the containers
+        """
         params = {'end_marker': self.containers[0]}
         resp, container_list = \
             self.account_client.list_account_containers(params=params)
@@ -190,7 +195,12 @@
 
     @decorators.idempotent_id('ac8502c2-d4e4-4f68-85a6-40befea2ef5e')
     def test_list_containers_with_marker_and_end_marker(self):
-        # list containers combining marker and end_marker param
+        """Test listing containers with marker and end_marker parameter
+
+        If we use the first container as marker, and the last container as
+        end_marker, then we should get all containers excluding the first one
+        and the last one.
+        """
         params = {'marker': self.containers[0],
                   'end_marker': self.containers[self.containers_count - 1]}
         resp, container_list = self.account_client.list_account_containers(
@@ -200,8 +210,10 @@
 
     @decorators.idempotent_id('f7064ae8-dbcc-48da-b594-82feef6ea5af')
     def test_list_containers_with_limit_and_marker(self):
-        # list containers combining marker and limit param
-        # result are always limitated by the limit whatever the marker
+        """Test listing containers combining marker and limit parameter
+
+        Result are always limited by the limit whatever the marker.
+        """
         for marker in random.choice(self.containers):
             limit = random.randint(0, self.containers_count - 1)
             params = {'marker': marker,
@@ -215,6 +227,10 @@
 
     @decorators.idempotent_id('888a3f0e-7214-4806-8e50-5e0c9a69bb5e')
     def test_list_containers_with_limit_and_end_marker(self):
+        """Test listing containers combining end_marker and limit parameter
+
+        Result are always limited by the limit whatever the end_marker.
+        """
         # list containers combining limit and end_marker param
         limit = random.randint(1, self.containers_count)
         params = {'limit': limit,
@@ -227,7 +243,11 @@
 
     @decorators.idempotent_id('8cf98d9c-e3a0-4e44-971b-c87656fdddbd')
     def test_list_containers_with_limit_and_marker_and_end_marker(self):
-        # list containers combining limit, marker and end_marker param
+        """Test listing containers combining marker and end_marker and limit
+
+        Result are always limited by the limit whatever the marker and the
+        end_marker.
+        """
         limit = random.randint(1, self.containers_count)
         params = {'limit': limit,
                   'marker': self.containers[0],
@@ -240,7 +260,7 @@
 
     @decorators.idempotent_id('365e6fc7-1cfe-463b-a37c-8bd08d47b6aa')
     def test_list_containers_with_prefix(self):
-        # list containers that have a name that starts with a prefix
+        """Test listing containers that have a name starting with a prefix"""
         prefix = 'tempest-a'
         params = {'prefix': prefix}
         resp, container_list = self.account_client.list_account_containers(
@@ -252,7 +272,7 @@
 
     @decorators.idempotent_id('b1811cff-d1ed-4c15-a52e-efd8de41cf34')
     def test_list_containers_reverse_order(self):
-        # list containers in reverse order
+        """Test listing containers in reverse order"""
         _, orig_container_list = self.account_client.list_account_containers()
 
         params = {'reverse': True}
@@ -265,8 +285,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('4894c312-6056-4587-8d6f-86ffbf861f80')
     def test_list_account_metadata(self):
-        # list all account metadata
-
+        """Test listing account metadata"""
         # set metadata to account
         metadata = {'test-account-meta1': 'Meta1',
                     'test-account-meta2': 'Meta2'}
@@ -282,14 +301,14 @@
 
     @decorators.idempotent_id('b904c2e3-24c2-4dba-ad7d-04e90a761be5')
     def test_list_no_account_metadata(self):
-        # list no account metadata
+        """Test listing account metadata for account without metadata"""
         resp, _ = self.account_client.list_account_metadata()
         self.assertHeaders(resp, 'Account', 'HEAD')
         self.assertNotIn('x-account-meta-', str(resp))
 
     @decorators.idempotent_id('e2a08b5f-3115-4768-a3ee-d4287acd6c08')
     def test_update_account_metadata_with_create_metadata(self):
-        # add metadata to account
+        """Test adding metadata to account"""
         metadata = {'test-account-meta1': 'Meta1'}
         resp, _ = self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -305,7 +324,7 @@
 
     @decorators.idempotent_id('9f60348d-c46f-4465-ae06-d51dbd470953')
     def test_update_account_metadata_with_delete_metadata(self):
-        # delete metadata from account
+        """Test deleting metadata from account"""
         metadata = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -318,8 +337,11 @@
 
     @decorators.idempotent_id('64fd53f3-adbd-4639-af54-436e4982dbfb')
     def test_update_account_metadata_with_create_metadata_key(self):
-        # if the value of metadata is not set, the metadata is not
-        # registered at a server
+        """Test adding metadata to account with empty value
+
+        Adding metadata with empty value to account, the metadata is not
+        registered.
+        """
         metadata = {'test-account-meta1': ''}
         resp, _ = self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata)
@@ -330,8 +352,11 @@
 
     @decorators.idempotent_id('d4d884d3-4696-4b85-bc98-4f57c4dd2bf1')
     def test_update_account_metadata_with_delete_metadata_key(self):
-        # Although the value of metadata is not set, the feature of
-        # deleting metadata is valid
+        """Test deleting metadata from account with empty value
+
+        Although the value of metadata is not set, the feature of deleting
+        metadata is valid, so the metadata is removed from account.
+        """
         metadata_1 = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata_1)
@@ -345,7 +370,11 @@
 
     @decorators.idempotent_id('8e5fc073-59b9-42ee-984a-29ed11b2c749')
     def test_update_account_metadata_with_create_and_delete_metadata(self):
-        # Send a request adding and deleting metadata requests simultaneously
+        """Test adding and deleting metadata simultaneously
+
+        Send a request adding and deleting metadata requests simultaneously,
+        both adding and deleting of metadata will succeed.
+        """
         metadata_1 = {'test-account-meta1': 'Meta1'}
         self.account_client.create_update_or_delete_account_metadata(
             create_update_metadata=metadata_1)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index bdcb5ae..276b826 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 import time
+from urllib import parse as urlparse
 
-from six.moves.urllib import parse as urlparse
 import testtools
 
 from tempest.api.object_storage import base
@@ -33,6 +33,8 @@
 
 
 class ContainerSyncTest(base.BaseObjectTest):
+    """Test container synchronization"""
+
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
 
@@ -90,7 +92,7 @@
             # create object in container
             object_name = data_utils.rand_name(name='TestSyncObject')
             data = object_name[::-1].encode()  # Raw data, we need bytes
-            resp, _ = obj_client[0].create_object(cont[0], object_name, data)
+            obj_client[0].create_object(cont[0], object_name, data)
             self.objects.append(object_name)
 
         # wait until container contents list is not empty
@@ -129,6 +131,7 @@
         not CONF.object_storage_feature_enabled.container_sync,
         'Old-style container sync function is disabled')
     def test_container_synchronization(self):
+        """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
             client_proxy_ip = \
diff --git a/tempest/api/object_storage/test_container_sync_middleware.py b/tempest/api/object_storage/test_container_sync_middleware.py
index e77b079..db6cfa4 100644
--- a/tempest/api/object_storage/test_container_sync_middleware.py
+++ b/tempest/api/object_storage/test_container_sync_middleware.py
@@ -27,6 +27,7 @@
 
 
 class ContainerSyncMiddlewareTest(test_container_sync.ContainerSyncTest):
+    """Test containers synchronization specifying realm and cluster"""
 
     @classmethod
     def resource_setup(cls):
@@ -41,6 +42,7 @@
     @decorators.idempotent_id('ea4645a1-d147-4976-82f7-e5a7a3065f80')
     @utils.requires_ext(extension='container_sync', service='object')
     def test_container_synchronization(self):
+        """Test container synchronization specifying realm and cluster"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
             account_name = cont_client.base_url.split('/')[-1]
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 1567e06..365dc78 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -19,6 +19,7 @@
 
 
 class CrossdomainTest(base.BaseObjectTest):
+    """Test crossdomain policy"""
 
     @classmethod
     def resource_setup(cls):
@@ -31,12 +32,10 @@
 
         cls.xml_end = "</cross-domain-policy>"
 
-    def setUp(self):
-        super(CrossdomainTest, self).setUp()
-
     @decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
     @utils.requires_ext(extension='crossdomain', service='object')
     def test_get_crossdomain_policy(self):
+        """Test getting crossdomain policy"""
         url = self.account_client._get_base_version_url() + "crossdomain.xml"
         resp, body = self.account_client.raw_request(url, "GET")
         self.account_client._error_checker(resp, body)
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index f5e2443..d4a6a9f2 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -21,9 +21,6 @@
 class HealthcheckTest(base.BaseObjectTest):
     """Test healthcheck"""
 
-    def setUp(self):
-        super(HealthcheckTest, self).setUp()
-
     @decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
     def test_get_healthcheck(self):
         """Test getting healthcheck"""
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 86f7c8c..6f6e32f 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -21,6 +21,8 @@
 
 
 class ObjectExpiryTest(base.BaseObjectTest):
+    """Test object expiry"""
+
     @classmethod
     def resource_setup(cls):
         super(ObjectExpiryTest, cls).resource_setup()
@@ -83,6 +85,7 @@
 
     @decorators.idempotent_id('fb024a42-37f3-4ba5-9684-4f40a7910b41')
     def test_get_object_after_expiry_time(self):
+        """Test object is expired after x-delete-after time"""
         # the 10s is important, because the get calls can take 3s each
         # some times
         metadata = {'X-Delete-After': '10'}
@@ -90,5 +93,6 @@
 
     @decorators.idempotent_id('e592f18d-679c-48fe-9e36-4be5f47102c5')
     def test_get_object_at_expiry_time(self):
+        """Test object is expired at x-delete-at time"""
         metadata = {'X-Delete-At': str(int(time.time()) + 10)}
         self._test_object_expiry(metadata)
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index d857d3b..39e895e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index 0499eef..971a223 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index acb578d..93f6fdb 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -13,12 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import random
 import re
 import time
 import zlib
 
+from oslo_utils.secretutils import md5
 from tempest.api.object_storage import base
 from tempest.common import custom_matchers
 from tempest import config
@@ -29,6 +29,7 @@
 
 
 class ObjectTest(base.BaseObjectTest):
+    """Test storage object"""
 
     @classmethod
     def resource_setup(cls):
@@ -78,6 +79,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('5b4ce26f-3545-46c9-a2ba-5754358a4c62')
     def test_create_object(self):
+        """Test creating object and checking the object's uploaded content"""
         # create object
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
@@ -97,7 +99,7 @@
 
     @decorators.idempotent_id('5daebb1d-f0d5-4dc9-b541-69672eff00b0')
     def test_create_object_with_content_disposition(self):
-        # create object with content_disposition
+        """Test creating object with content-disposition"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {}
@@ -119,7 +121,7 @@
 
     @decorators.idempotent_id('605f8317-f945-4bee-ae91-013f1da8f0a0')
     def test_create_object_with_content_encoding(self):
-        # create object with content_encoding
+        """Test creating object with content-encoding"""
         object_name = data_utils.rand_name(name='TestObject')
 
         # put compressed string
@@ -146,11 +148,11 @@
 
     @decorators.idempotent_id('73820093-0503-40b1-a478-edf0e69c7d1f')
     def test_create_object_with_etag(self):
-        # create object with etag
+        """Test creating object with Etag"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
-        md5 = hashlib.md5(data).hexdigest()
-        metadata = {'Etag': md5}
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
+        metadata = {'Etag': create_md5}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
@@ -165,8 +167,7 @@
 
     @decorators.idempotent_id('84dafe57-9666-4f6d-84c8-0814d37923b8')
     def test_create_object_with_expect_continue(self):
-        # create object with expect_continue
-
+        """Test creating object with expect_continue"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
 
@@ -181,8 +182,9 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
+    @decorators.skip_because(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
-        # create object with transfer_encoding
+        """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
         headers = {'Transfer-Encoding': 'chunked'}
@@ -202,7 +204,10 @@
 
     @decorators.idempotent_id('0f3d62a6-47e3-4554-b0e5-1a5dc372d501')
     def test_create_object_with_x_fresh_metadata(self):
-        # create object with x_fresh_metadata
+        """Test creating object with x-fresh-metadata
+
+        The previous added metadata will be cleared.
+        """
         object_name_base = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
@@ -228,7 +233,7 @@
 
     @decorators.idempotent_id('1c7ed3e4-2099-406b-b843-5301d4811baf')
     def test_create_object_with_x_object_meta(self):
-        # create object with object_meta
+        """Test creating object with x-object-meta"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -247,7 +252,7 @@
 
     @decorators.idempotent_id('e4183917-33db-4153-85cc-4dacbb938865')
     def test_create_object_with_x_object_metakey(self):
-        # create object with the blank value of metadata
+        """Test creating object with the blank value of metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': ''}
@@ -266,7 +271,10 @@
 
     @decorators.idempotent_id('ce798afc-b278-45de-a5ce-2ea124b98b99')
     def test_create_object_with_x_remove_object_meta(self):
-        # create object with x_remove_object_meta
+        """Test creating object with x-remove-object-meta
+
+        The metadata will be removed from the object.
+        """
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -289,7 +297,11 @@
 
     @decorators.idempotent_id('ad21e342-7916-4f9e-ab62-a1f885f2aaf9')
     def test_create_object_with_x_remove_object_metakey(self):
-        # create object with the blank value of remove metadata
+        """Test creating object with the blank value of remove metadata
+
+        Creating object with blank metadata 'X-Remove-Object-Meta-test-meta',
+        metadata 'x-object-meta-test-meta' will be removed from the object.
+        """
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -312,7 +324,7 @@
 
     @decorators.idempotent_id('17738d45-03bd-4d45-9e0b-7b2f58f98687')
     def test_delete_object(self):
-        # create object
+        """Test deleting object"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         resp, _ = self.object_client.create_object(self.container_name,
@@ -325,7 +337,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('7a94c25d-66e6-434c-9c38-97d4e2c29945')
     def test_update_object_metadata(self):
-        # update object metadata
+        """Test updating object metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -343,7 +355,7 @@
 
     @decorators.idempotent_id('48650ed0-c189-4e1e-ad6b-1d4770c6e134')
     def test_update_object_metadata_with_remove_metadata(self):
-        # update object metadata with remove metadata
+        """Test updating object metadata with remove metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
@@ -366,6 +378,11 @@
 
     @decorators.idempotent_id('f726174b-2ded-4708-bff7-729d12ce1f84')
     def test_update_object_metadata_with_create_and_remove_metadata(self):
+        """Test updating object with creation and deletion of metadata
+
+        Update object with creation and deletion of metadata with one
+        request, both operations will succeed.
+        """
         # creation and deletion of metadata with one request
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
@@ -392,8 +409,7 @@
 
     @decorators.idempotent_id('08854588-6449-4bb7-8cca-f2e1040f5e6f')
     def test_update_object_metadata_with_x_object_manifest(self):
-        # update object metadata with x_object_manifest
-
+        """Test updating object metadata with x_object_manifest"""
         # uploading segments
         object_name, _ = self._upload_segments()
         # creating a manifest file
@@ -418,7 +434,7 @@
 
     @decorators.idempotent_id('0dbbe89c-6811-4d84-a2df-eca2bdd40c0e')
     def test_update_object_metadata_with_x_object_metakey(self):
-        # update object metadata with a blank value of metadata
+        """Test updating object metadata with a blank value of metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         update_metadata = {'X-Object-Meta-test-meta': ''}
@@ -436,7 +452,7 @@
 
     @decorators.idempotent_id('9a88dca4-b684-425b-806f-306cd0e57e42')
     def test_update_object_metadata_with_x_remove_object_metakey(self):
-        # update object metadata with a blank value of remove metadata
+        """Test updating object metadata with blank remove metadata value"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.arbitrary_string()
         create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -460,7 +476,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('9a447cf6-de06-48de-8226-a8c6ed31caf2')
     def test_list_object_metadata(self):
-        # get object metadata
+        """Test listing object metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -478,7 +494,7 @@
 
     @decorators.idempotent_id('170fb90e-f5c3-4b1f-ae1b-a18810821172')
     def test_list_no_object_metadata(self):
-        # get empty list of object metadata
+        """Test listing object metadata for object without metadata"""
         object_name, _ = self.create_object(self.container_name)
 
         resp, _ = self.object_client.list_object_metadata(
@@ -489,8 +505,7 @@
 
     @decorators.idempotent_id('23a3674c-d6de-46c3-86af-ff92bfc8a3da')
     def test_list_object_metadata_with_x_object_manifest(self):
-        # get object metadata with x_object_manifest
-
+        """Test getting object metadata with x_object_manifest"""
         # uploading segments
         object_name, _ = self._upload_segments()
         # creating a manifest file
@@ -530,7 +545,7 @@
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('02610ba7-86b7-4272-9ed8-aa8d417cb3cd')
     def test_get_object(self):
-        # retrieve object's data (in response body)
+        """Test retrieving object's data (in response body)"""
 
         # create object
         object_name, data = self.create_object(self.container_name)
@@ -543,7 +558,7 @@
 
     @decorators.idempotent_id('005f9bf6-e06d-41ec-968e-96c78e0b1d82')
     def test_get_object_with_metadata(self):
-        # get object with metadata
+        """Test getting object with metadata"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -562,7 +577,7 @@
 
     @decorators.idempotent_id('05a1890e-7db9-4a6c-90a8-ce998a2bddfa')
     def test_get_object_with_range(self):
-        # get object with range
+        """Test getting object with range"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(100)
         self.object_client.create_object(self.container_name,
@@ -580,7 +595,7 @@
 
     @decorators.idempotent_id('11b4515b-7ba7-4ca8-8838-357ded86fc10')
     def test_get_object_with_x_object_manifest(self):
-        # get object with x_object_manifest
+        """Test getting object with x_object_manifest"""
 
         # uploading segments
         object_name, data_segments = self._upload_segments()
@@ -623,10 +638,10 @@
 
     @decorators.idempotent_id('c05b4013-e4de-47af-be84-e598062b16fc')
     def test_get_object_with_if_match(self):
-        # get object with if_match
+        """Test getting object with if_match"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(10)
-        create_md5 = hashlib.md5(data).hexdigest()
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
         create_metadata = {'Etag': create_md5}
         self.object_client.create_object(self.container_name,
                                          object_name,
@@ -643,7 +658,7 @@
 
     @decorators.idempotent_id('be133639-e5d2-4313-9b1f-2d59fc054a16')
     def test_get_object_with_if_modified_since(self):
-        # get object with if_modified_since
+        """Test getting object with if_modified_since"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
         time_now = time.time()
@@ -663,10 +678,10 @@
 
     @decorators.idempotent_id('641500d5-1612-4042-a04d-01fc4528bc30')
     def test_get_object_with_if_none_match(self):
-        # get object with if_none_match
+        """Test getting object with if_none_match"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes()
-        create_md5 = hashlib.md5(data).hexdigest()
+        create_md5 = md5(data, usedforsecurity=False).hexdigest()
         create_metadata = {'Etag': create_md5}
         self.object_client.create_object(self.container_name,
                                          object_name,
@@ -674,7 +689,7 @@
                                          metadata=create_metadata)
 
         list_data = data_utils.random_bytes()
-        list_md5 = hashlib.md5(list_data).hexdigest()
+        list_md5 = md5(list_data, usedforsecurity=False).hexdigest()
         list_metadata = {'If-None-Match': list_md5}
         resp, body = self.object_client.get_object(
             self.container_name,
@@ -685,7 +700,7 @@
 
     @decorators.idempotent_id('0aa1201c-10aa-467a-bee7-63cbdd463152')
     def test_get_object_with_if_unmodified_since(self):
-        # get object with if_unmodified_since
+        """Test getting object with if_unmodified_since"""
         object_name, data = self.create_object(self.container_name)
 
         time_now = time.time()
@@ -700,7 +715,7 @@
 
     @decorators.idempotent_id('94587078-475f-48f9-a40f-389c246e31cd')
     def test_get_object_with_x_newest(self):
-        # get object with x_newest
+        """Test getting object with x_newest"""
         object_name, data = self.create_object(self.container_name)
 
         list_metadata = {'X-Newest': 'true'}
@@ -713,6 +728,7 @@
 
     @decorators.idempotent_id('1a9ab572-1b66-4981-8c21-416e2a5e6011')
     def test_copy_object_in_same_container(self):
+        """Test copying object to another object in same container"""
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
         src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -742,7 +758,7 @@
 
     @decorators.idempotent_id('2248abba-415d-410b-9c30-22dff9cd6e67')
     def test_copy_object_to_itself(self):
-        # change the content type of an existing object
+        """Test changing the content type of an existing object"""
 
         # create object
         object_name, _ = self.create_object(self.container_name)
@@ -755,11 +771,11 @@
         headers = {}
         headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
                                             str(object_name))
-        resp, body = self.object_client.create_object(self.container_name,
-                                                      object_name,
-                                                      data=None,
-                                                      metadata=metadata,
-                                                      headers=headers)
+        resp, _ = self.object_client.create_object(self.container_name,
+                                                   object_name,
+                                                   data=None,
+                                                   metadata=metadata,
+                                                   headers=headers)
         self.assertHeaders(resp, 'Object', 'PUT')
 
         # check the content type
@@ -769,6 +785,7 @@
 
     @decorators.idempotent_id('06f90388-2d0e-40aa-934c-e9a8833e958a')
     def test_copy_object_2d_way(self):
+        """Test copying object's data to the new object using COPY"""
         # create source object
         src_object_name = data_utils.rand_name(name='SrcObject')
         src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -793,6 +810,7 @@
 
     @decorators.idempotent_id('aa467252-44f3-472a-b5ae-5b57c3c9c147')
     def test_copy_object_across_containers(self):
+        """Test copying object to another container"""
         # create a container to use as a source container
         src_container_name = data_utils.rand_name(name='TestSourceContainer')
         self.container_client.update_container(src_container_name)
@@ -837,6 +855,7 @@
 
     @decorators.idempotent_id('5a9e2cc6-85b6-46fc-916d-0cbb7a88e5fd')
     def test_copy_object_with_x_fresh_metadata(self):
+        """Test copying objectwith x_fresh_metadata"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_object_name, data = self.create_object(self.container_name,
@@ -858,6 +877,7 @@
 
     @decorators.idempotent_id('a28a8b99-e701-4d7e-9d84-3b66f121460b')
     def test_copy_object_with_x_object_metakey(self):
+        """Test copying object with x_object_metakey"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_obj_name, data = self.create_object(self.container_name,
@@ -881,6 +901,7 @@
 
     @decorators.idempotent_id('edabedca-24c3-4322-9b70-d6d9f942a074')
     def test_copy_object_with_x_object_meta(self):
+        """Test copying object with x_object_meta"""
         # create source object
         metadata = {'x-object-meta-src': 'src_value'}
         src_obj_name, data = self.create_object(self.container_name,
@@ -904,6 +925,7 @@
 
     @decorators.idempotent_id('e3e6a64a-9f50-4955-b987-6ce6767c97fb')
     def test_object_upload_in_segments(self):
+        """Test uploading object in segments"""
         # create object
         object_name = data_utils.rand_name(name='LObject')
         data = data_utils.arbitrary_string()
@@ -947,14 +969,17 @@
 
     @decorators.idempotent_id('50d01f12-526f-4360-9ac2-75dd508d7b68')
     def test_get_object_if_different(self):
-        # http://en.wikipedia.org/wiki/HTTP_ETag
-        # Make a conditional request for an object using the If-None-Match
-        # header, it should get downloaded only if the local file is different,
-        # otherwise the response code should be 304 Not Modified
+        """Test getting object content only when the local file is different
+
+        http://en.wikipedia.org/wiki/HTTP_ETag
+        Make a conditional request for an object using the If-None-Match
+        header, it should get downloaded only if the local file is different,
+        otherwise the response code should be 304 Not Modified
+        """
         object_name, data = self.create_object(self.container_name)
         # local copy is identical, no download
-        md5 = hashlib.md5(data).hexdigest()
-        headers = {'If-None-Match': md5}
+        object_md5 = md5(data, usedforsecurity=False).hexdigest()
+        headers = {'If-None-Match': object_md5}
         url = "%s/%s" % (self.container_name, object_name)
         resp, _ = self.object_client.get(url, headers=headers)
         self.assertEqual(resp['status'], '304')
@@ -968,13 +993,14 @@
 
         # local copy is different, download
         local_data = "something different"
-        md5 = hashlib.md5(local_data.encode()).hexdigest()
-        headers = {'If-None-Match': md5}
+        other_md5 = md5(local_data.encode(), usedforsecurity=False).hexdigest()
+        headers = {'If-None-Match': other_md5}
         resp, _ = self.object_client.get(url, headers=headers)
         self.assertHeaders(resp, 'Object', 'GET')
 
 
 class PublicObjectTest(base.BaseObjectTest):
+    """Test public storage object"""
 
     credentials = [['operator', CONF.object_storage.operator_role],
                    ['operator_alt', CONF.object_storage.operator_role]]
@@ -1000,9 +1026,11 @@
 
     @decorators.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
     def test_access_public_container_object_without_using_creds(self):
-        # make container public-readable and access an object in it object
-        # anonymously, without using credentials
+        """Test accessing public container object without using credentials
 
+        Make container public-readable and access an object in it object
+        anonymously, without using credentials.
+        """
         # update container metadata to make it publicly readable
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
         resp_meta, body = (
@@ -1040,8 +1068,11 @@
 
     @decorators.idempotent_id('54e2a2fe-42dc-491b-8270-8e4217dd4cdc')
     def test_access_public_object_with_another_user_creds(self):
-        # make container public-readable and access an object in it using
-        # another user's credentials
+        """Test accessing public object with another user's credentials
+
+        Make container public-readable and access an object in it using
+        another user's credentials.
+        """
         cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
         resp_meta, body = (
             self.container_client.create_update_or_delete_container_metadata(
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 8bb2e6e..0c84357 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -12,10 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
-
 from oslo_serialization import jsonutils as json
 
+from oslo_utils.secretutils import md5
 from tempest.api.object_storage import base
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
@@ -27,6 +26,7 @@
 
 
 class ObjectSloTest(base.BaseObjectTest):
+    """Test static large object"""
 
     def setUp(self):
         super(ObjectSloTest, self).setUp()
@@ -69,10 +69,12 @@
         path_object_2 = '/%s/%s' % (self.container_name,
                                     object_name_base_2)
         data_manifest = [{'path': path_object_1,
-                          'etag': hashlib.md5(self.content).hexdigest(),
+                          'etag': md5(self.content,
+                                      usedforsecurity=False).hexdigest(),
                           'size_bytes': data_size},
                          {'path': path_object_2,
-                          'etag': hashlib.md5(self.content).hexdigest(),
+                          'etag': md5(self.content,
+                                      usedforsecurity=False).hexdigest(),
                           'size_bytes': data_size}]
 
         return json.dumps(data_manifest)
@@ -108,7 +110,7 @@
     @decorators.idempotent_id('2c3f24a6-36e8-4711-9aa2-800ee1fc7b5b')
     @utils.requires_ext(extension='slo', service='object')
     def test_upload_manifest(self):
-        # create static large object from multipart manifest
+        """Test creating static large object from multipart manifest"""
         manifest = self._create_manifest()
 
         params = {'multipart-manifest': 'put'}
@@ -123,7 +125,10 @@
     @decorators.idempotent_id('e69ad766-e1aa-44a2-bdd2-bf62c09c1456')
     @utils.requires_ext(extension='slo', service='object')
     def test_list_large_object_metadata(self):
-        # list static large object metadata using multipart manifest
+        """Test listing static large object metadata
+
+        List static large object metadata using multipart manifest
+        """
         object_name = self._create_large_object()
 
         resp, _ = self.object_client.list_object_metadata(
@@ -135,7 +140,7 @@
     @decorators.idempotent_id('49bc49bc-dd1b-4c0f-904e-d9f10b830ee8')
     @utils.requires_ext(extension='slo', service='object')
     def test_retrieve_large_object(self):
-        # list static large object using multipart manifest
+        """Test listing static large object using multipart manifest"""
         object_name = self._create_large_object()
 
         resp, body = self.object_client.get_object(
@@ -150,7 +155,7 @@
     @decorators.idempotent_id('87b6dfa1-abe9-404d-8bf0-6c3751e6aa77')
     @utils.requires_ext(extension='slo', service='object')
     def test_delete_large_object(self):
-        # delete static large object using multipart manifest
+        """Test deleting static large object using multipart manifest"""
         object_name = self._create_large_object()
 
         params_del = {'multipart-manifest': 'delete'}
@@ -161,6 +166,6 @@
 
         self.assertHeaders(resp, 'Object', 'DELETE')
 
-        resp, body = self.container_client.list_container_objects(
+        resp, _ = self.container_client.list_container_objects(
             self.container_name)
         self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index 29354b6..e75e22a 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index bbb4827..4ad8428 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -15,8 +15,7 @@
 import hashlib
 import hmac
 import time
-
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from tempest.api.object_storage import base
 from tempest.common import utils
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 1351704..3c76eca 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -20,6 +20,7 @@
 
 
 class BackendsCapabilitiesAdminTestsJSON(base.BaseVolumeAdminTest):
+    """Test backends capabilities"""
 
     @classmethod
     def resource_setup(cls):
@@ -32,14 +33,16 @@
 
     @decorators.idempotent_id('3750af44-5ea2-4cd4-bc3e-56e7e6caf854')
     def test_get_capabilities_backend(self):
-        # Test backend properties
+        """Test getting backend capabilities"""
         # Check response schema
         self.admin_capabilities_client.show_backend_capabilities(self.hosts[0])
 
     @decorators.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
     def test_compare_volume_stats_values(self):
-        # Test values comparison between show_backend_capabilities
-        # to show_pools
+        """Test comparing volume stats values
+
+        Compare volume stats between show_backend_capabilities and show_pools.
+        """
         VOLUME_STATS = ('vendor_name',
                         'volume_backend_name',
                         'storage_protocol')
diff --git a/tempest/api/volume/admin/test_encrypted_volumes_extend.py b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
new file mode 100644
index 0000000..7339179
--- /dev/null
+++ b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
@@ -0,0 +1,35 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import testtools
+
+from tempest.api.volume import base
+from tempest.api.volume import test_volumes_extend as extend
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class EncryptedVolumesExtendAttachedTest(extend.BaseVolumesExtendAttachedTest,
+                                         base.BaseVolumeAdminTest):
+    """Tests extending the size of an attached encrypted volume."""
+
+    @decorators.idempotent_id('e93243ec-7c37-4b5b-a099-ebf052c13216')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.extend_attached_encrypted_volume,
+        "Attached encrypted volume extend is disabled.")
+    @utils.services('compute')
+    def test_extend_attached_encrypted_volume_luksv1(self):
+        volume = self.create_encrypted_volume(encryption_provider="luks")
+        self._test_extend_attached_volume(volume)
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index c57766e..659e2c4 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -62,12 +62,25 @@
 
 
 class GroupSnapshotsTest(BaseGroupSnapshotsTest):
-    _api_version = 3
+    """Test group snapshot"""
+
     min_microversion = '3.14'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
     def test_group_snapshot_create_show_list_delete(self):
+        """Test create/show/list/delete group snapshot
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create volume "volume1" with "volume_type1" and "group1"
+        5. Create group snapshot "group_snapshot1" with "group1"
+        6. Check snapshot created from "volume1" reaches available status
+        7. Check the created group snapshot "group_snapshot1" is in the list
+           of all group snapshots
+        8. Delete group snapshot "group_snapshot1"
+        """
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -118,6 +131,18 @@
 
     @decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
     def test_create_group_from_group_snapshot(self):
+        """Test creating group from group snapshot
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create volume "volume1" with "volume_type1" and "group1"
+        5. Create group snapshot "group_snapshot1" with "group1"
+        6. Check snapshot created from "volume1" reaches available status
+        7. Create group "group2" from "group_snapshot1"
+        8. Check the volumes belonging to "group2" reach available status
+        9. Check "group2" reaches available status
+        """
         # Create volume type
         volume_type = self.create_volume_type()
 
@@ -161,6 +186,20 @@
     @decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
     @decorators.related_bug('1739031')
     def test_delete_group_snapshots_following_updated_volumes(self):
+        """Test deleting group snapshot following updated volumes
+
+        1. Create volume type "volume_type1"
+        2. Create group type "group_type1"
+        3. Create group "group1" with "group_type1" and "volume_type1"
+        4. Create 2 volumes "volume1" and "volume2"
+           with "volume_type1" and "group1"
+        5. For each created volume, removing and then adding back to "group1"
+        6. Create group snapshot "group_snapshot1" with "group1"
+        7. Check snapshots created from "volume1" and "volume2" reach
+           available status
+        8. Delete "group_snapshot1"
+        9. Check snapshots created from "volume1" and "volume2" are deleted
+        """
         volume_type = self.create_volume_type()
 
         group_type = self.create_group_type()
@@ -211,13 +250,15 @@
 
 
 class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
-    _api_version = 3
+    """Test group snapshot with volume microversion greater than 3.18"""
+
     min_microversion = '3.19'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
     @decorators.skip_because(bug='1770179')
     def test_reset_group_snapshot_status(self):
+        """Test resetting group snapshot status to creating/available/error"""
         # Create volume type
         volume_type = self.create_volume_type()
 
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
index c5e6d1a..5c5913e 100644
--- a/tempest/api/volume/admin/test_group_type_specs.py
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -19,12 +19,14 @@
 
 
 class GroupTypeSpecsTest(base.BaseVolumeAdminTest):
-    _api_version = 3
+    """Test group type specs"""
+
     min_microversion = '3.11'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('bb4e30d0-de6e-4f4d-866c-dcc48d023b4e')
     def test_group_type_specs_create_show_update_list_delete(self):
+        """Test create/show/update/list/delete group type specs"""
         # Create new group type
         group_type = self.create_group_type()
 
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 6723207..a7a5d6f 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -19,13 +19,14 @@
 
 
 class GroupTypesTest(base.BaseVolumeAdminTest):
-    _api_version = 3
+    """Test group types"""
+
     min_microversion = '3.11'
     max_microversion = 'latest'
 
     @decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
     def test_group_type_create_list_update_show(self):
-        # Create/list/show group type.
+        """Test create/list/update/show group type"""
         name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
         description = data_utils.rand_name("group-type-description")
         group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
diff --git a/tempest/api/volume/admin/test_groups.py b/tempest/api/volume/admin/test_groups.py
index e67b985..747a194 100644
--- a/tempest/api/volume/admin/test_groups.py
+++ b/tempest/api/volume/admin/test_groups.py
@@ -25,7 +25,6 @@
 class GroupsTest(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.12"""
 
-    _api_version = 3
     min_microversion = '3.13'
     max_microversion = 'latest'
 
@@ -156,7 +155,6 @@
 class GroupsV314Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.13"""
 
-    _api_version = 3
     min_microversion = '3.14'
     max_microversion = 'latest'
 
@@ -194,7 +192,6 @@
 class GroupsV320Test(base.BaseVolumeAdminTest):
     """Tests of volume groups with microversion greater than 3.19"""
 
-    _api_version = 3
     min_microversion = '3.20'
     max_microversion = 'latest'
 
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c5c70d2..83733bd 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -10,7 +10,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 from tempest.api.volume import base
 from tempest.common import waiters
 from tempest import config
@@ -21,6 +20,7 @@
 
 
 class VolumeMultiBackendTest(base.BaseVolumeAdminTest):
+    """Test volume multi backends"""
 
     @classmethod
     def skip_checks(cls):
@@ -78,24 +78,49 @@
 
     @decorators.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
     def test_backend_name_reporting(self):
-        # get volume id which created by type without prefix
+        """Test backend name reporting for volume when type is without prefix
+
+        1. Create volume type, with 'volume_backend_name' as extra spec key
+        2. Create volume using the created volume type
+        3. Check 'os-vol-host-attr:host' of the volume info, the value should
+           contain '@' character, like 'cinder@CloveStorage#tecs_backend'
+        """
         for volume_id in self.volume_id_list_without_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @decorators.idempotent_id('f38e647f-ab42-4a31-a2e7-ca86a6485215')
     def test_backend_name_reporting_with_prefix(self):
-        # get volume id which created by type with prefix
+        """Test backend name reporting for volume when type is with prefix
+
+        1. Create volume type, with 'capabilities:volume_backend_name' as
+           extra spec key
+        2. Create volume using the created volume type
+        3. Check 'os-vol-host-attr:host' of the volume info, the value should
+           contain '@' character, like 'cinder@CloveStorage#tecs_backend'
+        """
         for volume_id in self.volume_id_list_with_prefix:
             self._test_backend_name_reporting_by_volume_id(volume_id)
 
     @decorators.idempotent_id('46435ab1-a0af-4401-8373-f14e66b0dd58')
     def test_backend_name_distinction(self):
-        # get volume ids which created by type without prefix
+        """Test volume backend distinction when type is without prefix
+
+        1. For each backend, create volume type with 'volume_backend_name'
+           as extra spec key
+        2. Create volumes using the created volume types
+        3. Check 'os-vol-host-attr:host' of each created volume is different.
+        """
         self._test_backend_name_distinction(self.volume_id_list_without_prefix)
 
     @decorators.idempotent_id('4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed')
     def test_backend_name_distinction_with_prefix(self):
-        # get volume ids which created by type without prefix
+        """Test volume backend distinction when type is with prefix
+
+        1. For each backend, create volume type with
+           'capabilities:volume_backend_name' as extra spec key
+        2. Create volumes using the created volume types
+        3. Check 'os-vol-host-attr:host' of each created volume is different.
+        """
         self._test_backend_name_distinction(self.volume_id_list_with_prefix)
 
     def _get_volume_host(self, volume_id):
@@ -123,4 +148,4 @@
         # assert that volumes are each created on separate hosts:
         msg = ("volumes %s were created in the same backend" % ", "
                .join(volume_hosts))
-        six.assertCountEqual(self, volume_hosts, set(volume_hosts), msg)
+        self.assertCountEqual(volume_hosts, set(volume_hosts), msg)
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index 41849bc..4fca240 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -22,6 +22,8 @@
 
 
 class SnapshotsActionsTest(base.BaseVolumeAdminTest):
+    """Test volume snapshot actions"""
+
     @classmethod
     def skip_checks(cls):
         super(SnapshotsActionsTest, cls).skip_checks()
@@ -65,7 +67,7 @@
 
     @decorators.idempotent_id('3e13ca2f-48ea-49f3-ae1a-488e9180d535')
     def test_reset_snapshot_status(self):
-        # Reset snapshot status to creating
+        """Test resetting snapshot status to creating"""
         status = 'creating'
         self.admin_snapshots_client.reset_snapshot_status(
             self.snapshot['id'], status)
@@ -74,6 +76,10 @@
 
     @decorators.idempotent_id('41288afd-d463-485e-8f6e-4eea159413eb')
     def test_update_snapshot_status(self):
+        """Test updating snapshot
+
+        Update snapshot status to 'error' and progress to '80%'.
+        """
         # Reset snapshot status to creating
         status = 'creating'
         self.admin_snapshots_client.reset_snapshot_status(
@@ -95,20 +101,20 @@
 
     @decorators.idempotent_id('05f711b6-e629-4895-8103-7ca069f2073a')
     def test_snapshot_force_delete_when_snapshot_is_creating(self):
-        # test force delete when status of snapshot is creating
+        """Test force delete when status of snapshot is creating"""
         self._create_reset_and_force_delete_temp_snapshot('creating')
 
     @decorators.idempotent_id('92ce8597-b992-43a1-8868-6316b22a969e')
     def test_snapshot_force_delete_when_snapshot_is_deleting(self):
-        # test force delete when status of snapshot is deleting
+        """Test force delete when status of snapshot is deleting"""
         self._create_reset_and_force_delete_temp_snapshot('deleting')
 
     @decorators.idempotent_id('645a4a67-a1eb-4e8e-a547-600abac1525d')
     def test_snapshot_force_delete_when_snapshot_is_error(self):
-        # test force delete when status of snapshot is error
+        """Test force delete when status of snapshot is error"""
         self._create_reset_and_force_delete_temp_snapshot('error')
 
     @decorators.idempotent_id('bf89080f-8129-465e-9327-b2f922666ba5')
     def test_snapshot_force_delete_when_snapshot_is_error_deleting(self):
-        # test force delete when status of snapshot is error_deleting
+        """Test force delete when status of snapshot is error_deleting"""
         self._create_reset_and_force_delete_temp_snapshot('error_deleting')
diff --git a/tempest/api/volume/admin/test_user_messages.py b/tempest/api/volume/admin/test_user_messages.py
index 096709c..768c129 100644
--- a/tempest/api/volume/admin/test_user_messages.py
+++ b/tempest/api/volume/admin/test_user_messages.py
@@ -24,7 +24,6 @@
 class UserMessagesTest(base.BaseVolumeAdminTest):
     """Test volume messages with microversion greater than 3.2"""
 
-    _api_version = 3
     min_microversion = '3.3'
     max_microversion = 'latest'
 
diff --git a/tempest/api/volume/admin/test_volume_quota_classes.py b/tempest/api/volume/admin/test_volume_quota_classes.py
index ee52354..f482788 100644
--- a/tempest/api/volume/admin/test_volume_quota_classes.py
+++ b/tempest/api/volume/admin/test_volume_quota_classes.py
@@ -30,6 +30,7 @@
 
 
 class VolumeQuotaClassesTest(base.BaseVolumeAdminTest):
+    """Test volume quota classes"""
 
     def setUp(self):
         # Note(jeremy.zhang): All test cases in this class need to externally
@@ -44,6 +45,7 @@
 
     @decorators.idempotent_id('abb9198e-67d0-4b09-859f-4f4a1418f176')
     def test_show_default_quota(self):
+        """Test showing default volume quota class set"""
         # response body is validated by schema
         default_quotas = self.admin_quota_classes_client.show_quota_class_set(
             'default')['quota_class_set']
@@ -51,6 +53,11 @@
 
     @decorators.idempotent_id('a7644c63-2669-467a-b00e-452dd5c5397b')
     def test_update_default_quota(self):
+        """Test updating default volume quota class set
+
+        Check current project and new project's default quota are updated
+        to the provided one.
+        """
         LOG.debug("Get the current default quota class values")
         body = self.admin_quota_classes_client.show_quota_class_set(
             'default')['quota_class_set']
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index ecc850e..98ae83b 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -23,17 +23,18 @@
 
 
 class VolumeTypesTest(base.BaseVolumeAdminTest):
+    """Test volume types"""
 
     @decorators.idempotent_id('9d9b28e3-1b2e-4483-a2cc-24aa0ea1de54')
     def test_volume_type_list(self):
-        # List volume types.
+        """Test listing volume types"""
         body = \
             self.admin_volume_types_client.list_volume_types()['volume_types']
         self.assertIsInstance(body, list)
 
     @decorators.idempotent_id('c03cc62c-f4e9-4623-91ec-64ce2f9c1260')
     def test_volume_crud_with_volume_type_and_extra_specs(self):
-        # Create/update/get/delete volume with volume_type and extra spec.
+        """Test create/update/get/delete volume with volume_type"""
         volume_types = list()
         vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume')
         proto = CONF.volume.storage_protocol
@@ -80,7 +81,7 @@
 
     @decorators.idempotent_id('4e955c3b-49db-4515-9590-0c99f8e471ad')
     def test_volume_type_create_get_delete(self):
-        # Create/get volume type.
+        """Test create/get/delete volume type"""
         name = data_utils.rand_name(self.__class__.__name__ + '-volume-type')
         description = data_utils.rand_name("volume-type-description")
         proto = CONF.volume.storage_protocol
@@ -118,8 +119,10 @@
 
     @decorators.idempotent_id('7830abd0-ff99-4793-a265-405684a54d46')
     def test_volume_type_encryption_create_get_update_delete(self):
-        # Create/get/update/delete encryption type.
+        """Test create/get/update/delete volume encryption type"""
         create_kwargs = {'provider': 'LuksEncryptor',
+                         'key_size': 256,
+                         'cipher': 'aes-xts-plain64',
                          'control_location': 'front-end'}
         volume_type_id = self.create_volume_type()['id']
 
@@ -175,6 +178,7 @@
 
     @decorators.idempotent_id('cf9f07c6-db9e-4462-a243-5933ad65e9c8')
     def test_volume_type_update(self):
+        """Test updating volume type details"""
         # Create volume type
         volume_type = self.create_volume_type()
 
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index fe249d6..6b2a278 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -20,6 +20,7 @@
 
 
 class ExtraSpecsNegativeTest(base.BaseVolumeAdminTest):
+    """Negative tests of volume type extra specs"""
 
     @classmethod
     def resource_setup(cls):
@@ -30,7 +31,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('08961d20-5cbb-4910-ac0f-89ad6dbb2da1')
     def test_update_no_body(self):
-        # Should not update volume type extra specs with no body
+        """Test updating volume type extra specs with no body should fail"""
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.update_volume_type_extra_specs,
@@ -39,7 +40,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('25e5a0ee-89b3-4c53-8310-236f76c75365')
     def test_update_nonexistent_extra_spec_id(self):
-        # Should not update volume type extra specs with nonexistent id.
+        """Test updating volume type extra specs with non existent name
+
+        Updating volume type extra specs with non existent extra spec name
+        should fail.
+        """
         extra_spec = {"spec1": "val2"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -50,7 +55,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9bf7a657-b011-4aec-866d-81c496fbe5c8')
     def test_update_none_extra_spec_id(self):
-        # Should not update volume type extra specs with none id.
+        """Test updating volume type extra specs without name
+
+        Updating volume type extra specs without extra spec name should fail.
+        """
         extra_spec = {"spec1": "val2"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -60,8 +68,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a77dfda2-9100-448e-9076-ed1711f4bdfc')
     def test_update_multiple_extra_spec(self):
-        # Should not update volume type extra specs with multiple specs as
-        # body.
+        """Test updating multiple volume type extra specs should fail"""
         extra_spec = {"spec1": "val2", "spec2": "val1"}
         self.assertRaises(
             lib_exc.BadRequest,
@@ -72,8 +79,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('49d5472c-a53d-4eab-a4d3-450c4db1c545')
     def test_create_nonexistent_type_id(self):
-        # Should not create volume type extra spec for nonexistent volume
-        # type id.
+        """Test creating volume type extra specs for non existent volume type
+
+        Creating volume type extra specs for non existent volume type should
+        fail.
+        """
         extra_specs = {"spec2": "val1"}
         self.assertRaises(
             lib_exc.NotFound,
@@ -83,7 +93,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c821bdc8-43a4-4bf4-86c8-82f3858d5f7d')
     def test_create_none_body(self):
-        # Should not create volume type extra spec for none POST body.
+        """Test creating volume type extra spec with none POST body
+
+        Creating volume type extra spec with none POST body should fail.
+        """
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type_extra_specs,
@@ -92,7 +105,10 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('bc772c71-1ed4-4716-b945-8b5ed0f15e87')
     def test_create_invalid_body(self):
-        # Should not create volume type extra spec for invalid POST body.
+        """Test creating volume type extra spec with invalid POST body
+
+        Creating volume type extra spec with invalid POST body should fail.
+        """
         self.assertRaises(
             lib_exc.BadRequest,
             self.admin_volume_types_client.create_volume_type_extra_specs,
@@ -101,8 +117,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('031cda8b-7d23-4246-8bf6-bbe73fd67074')
     def test_delete_nonexistent_volume_type_id(self):
-        # Should not delete volume type extra spec for nonexistent
-        # type id.
+        """Test deleting volume type extra spec for non existent volume type
+
+        Deleting volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.delete_volume_type_extra_specs,
@@ -111,7 +130,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('dee5cf0c-cdd6-4353-b70c-e847050d71fb')
     def test_list_nonexistent_volume_type_id(self):
-        # Should not list volume type extra spec for nonexistent type id.
+        """Test listing volume type extra spec for non existent volume type
+
+        Listing volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.list_volume_types_extra_specs,
@@ -120,7 +143,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('9f402cbd-1838-4eb4-9554-126a6b1908c9')
     def test_get_nonexistent_volume_type_id(self):
-        # Should not get volume type extra spec for nonexistent type id.
+        """Test getting volume type extra spec for non existent volume type
+
+        Getting volume type extra spec for non existent volume type should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.show_volume_type_extra_specs,
@@ -129,8 +156,11 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('c881797d-12ff-4f1a-b09d-9f6212159753')
     def test_get_nonexistent_extra_spec_name(self):
-        # Should not get volume type extra spec for nonexistent extra spec
-        # name.
+        """Test getting volume type extra spec for non existent spec name
+
+        Getting volume type extra spec for non existent extra spec name should
+        fail.
+        """
         self.assertRaises(
             lib_exc.NotFound,
             self.admin_volume_types_client.show_volume_type_extra_specs,
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index bd4b3fa..835cc1d 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -26,6 +26,7 @@
 
 
 class VolumesBackupsAdminTest(base.BaseVolumeAdminTest):
+    """Test volume backups"""
 
     @classmethod
     def skip_checks(cls):
@@ -118,6 +119,7 @@
 
     @decorators.idempotent_id('47a35425-a891-4e13-961c-c45deea21e94')
     def test_volume_backup_reset_status(self):
+        """Test resetting volume backup status to error"""
         # Create a volume
         volume = self.create_volume()
         # Create a backup
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 7af5927..d5c6fd9 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -20,7 +20,6 @@
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
-from tempest.lib import exceptions
 import tempest.test
 
 CONF = config.CONF
@@ -33,12 +32,6 @@
     # Set this to True in subclasses to create a default network. See
     # https://bugs.launchpad.net/tempest/+bug/1844568
     create_default_network = False
-    _api_version = 2
-    # if api_v2 is not enabled while api_v3 is enabled, the volume v2 classes
-    # should be transferred to volume v3 classes.
-    if (not CONF.volume_feature_enabled.api_v2 and
-        CONF.volume_feature_enabled.api_v3):
-        _api_version = 3
     credentials = ['primary']
 
     @classmethod
@@ -48,17 +41,6 @@
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
-        if cls._api_version == 2:
-            if not CONF.volume_feature_enabled.api_v2:
-                msg = "Volume API v2 is disabled"
-                raise cls.skipException(msg)
-        elif cls._api_version == 3:
-            if not CONF.volume_feature_enabled.api_v3:
-                msg = "Volume API v3 is disabled"
-                raise cls.skipException(msg)
-        else:
-            msg = ("Invalid Cinder API version (%s)" % cls._api_version)
-            raise exceptions.InvalidConfiguration(msg)
 
         api_version_utils.check_skip_with_microversion(
             cls.min_microversion, cls.max_microversion,
@@ -129,6 +111,13 @@
             name = data_utils.rand_name(cls.__name__ + '-Volume')
             kwargs['name'] = name
 
+        if CONF.volume.volume_type and 'volume_type' not in kwargs:
+            # If volume_type is not provided in config then no need to
+            # add a volume type and
+            # if volume_type has already been added by child class then
+            # no need to override.
+            kwargs['volume_type'] = CONF.volume.volume_type
+
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
                               CONF.compute.compute_volume_common_az)
@@ -166,6 +155,10 @@
 
         backup = backup_client.create_backup(
             volume_id=volume_id, **kwargs)['backup']
+        # addCleanup uses list pop to cleanup. Wait should be added before
+        # the backup is deleted
+        self.addCleanup(backup_client.wait_for_resource_deletion,
+                        backup['id'])
         self.addCleanup(backup_client.delete_backup, backup['id'])
         waiters.wait_for_volume_resource_status(backup_client, backup['id'],
                                                 'available')
@@ -302,6 +295,27 @@
         cls.addClassResourceCleanup(cls.clear_volume_type, volume_type['id'])
         return volume_type
 
+    def create_encryption_type(self, type_id=None, provider=None,
+                               key_size=None, cipher=None,
+                               control_location=None):
+        if not type_id:
+            volume_type = self.create_volume_type()
+            type_id = volume_type['id']
+        self.admin_encryption_types_client.create_encryption_type(
+            type_id, provider=provider, key_size=key_size, cipher=cipher,
+            control_location=control_location)
+
+    def create_encrypted_volume(self, encryption_provider, key_size=256,
+                                cipher='aes-xts-plain64',
+                                control_location='front-end'):
+        volume_type = self.create_volume_type()
+        self.create_encryption_type(type_id=volume_type['id'],
+                                    provider=encryption_provider,
+                                    key_size=key_size,
+                                    cipher=cipher,
+                                    control_location=control_location)
+        return self.create_volume(volume_type=volume_type['name'])
+
     def create_group_type(self, name=None, **kwargs):
         """Create a test group-type"""
         name = name or data_utils.rand_name(
diff --git a/tempest/api/volume/test_versions.py b/tempest/api/volume/test_versions.py
index e065bdf..578be58 100644
--- a/tempest/api/volume/test_versions.py
+++ b/tempest/api/volume/test_versions.py
@@ -19,8 +19,6 @@
 class VersionsTest(base.BaseVolumeTest):
     """Test volume versions"""
 
-    _api_version = 3
-
     @decorators.idempotent_id('77838fc4-b49b-4c64-9533-166762517369')
     @decorators.attr(type='smoke')
     def test_list_versions(self):
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 3eb81f5..9600aa9 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -104,3 +104,30 @@
         self.client.delete_volume_transfer(transfer_id)
         waiters.wait_for_volume_resource_status(
             self.volumes_client, volume['id'], 'available')
+
+
+class VolumesTransfersV355Test(VolumesTransfersTest):
+    """Test volume transfer for the "new" Transfers API mv 3.55"""
+
+    min_microversion = '3.55'
+    max_microversion = 'latest'
+
+    credentials = ['primary', 'alt', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(VolumesTransfersV355Test, cls).setup_clients()
+        cls.client = cls.os_primary.volume_transfers_mv355_client_latest
+        cls.alt_client = cls.os_alt.volume_transfers_mv355_client_latest
+
+    @decorators.idempotent_id('9f36bb2b-619f-4507-b246-76aeb9a28851')
+    def test_create_get_list_accept_volume_transfer(self):
+        """Test create, get, list, accept with volume-transfers API mv 3.55"""
+        super(VolumesTransfersV355Test, self). \
+            test_create_get_list_accept_volume_transfer()
+
+    @decorators.idempotent_id('af4a5b97-0859-4f31-aa3c-85b05bb63322')
+    def test_create_list_delete_volume_transfer(self):
+        """Test create, list, delete with volume-transfers API mv 3.55"""
+        super(VolumesTransfersV355Test, self). \
+            test_create_list_delete_volume_transfer()
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index 2e78114..fff6a44 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -164,7 +164,6 @@
 class VolumesBackupsV39Test(base.BaseVolumeTest):
     """Test volumes backup with volume microversion greater than 3.8"""
 
-    _api_version = 3
     min_microversion = '3.9'
     max_microversion = 'latest'
 
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 041823d..d9790f3 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -61,7 +61,7 @@
         self.assertEqual(extend_size, resized_volume['size'])
 
 
-class VolumesExtendAttachedTest(base.BaseVolumeTest):
+class BaseVolumesExtendAttachedTest(base.BaseVolumeTest):
     """Tests extending the size of an attached volume."""
     create_default_network = True
 
@@ -77,7 +77,6 @@
     # details once that microversion is available in Nova.
     credentials = ['primary', 'admin']
 
-    _api_version = 3
     # NOTE(mriedem): The minimum required volume API version is 3.42 and the
     # minimum required compute API microversion is 2.51, but the compute call
     # is implicit - Cinder calls Nova at that microversion, Tempest does not.
@@ -100,14 +99,9 @@
                     event['finish_time']):
                 return event
 
-    @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
-    @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
-                          "Attached volume extend is disabled.")
-    @utils.services('compute')
-    def test_extend_attached_volume(self):
+    def _test_extend_attached_volume(self, volume):
         """This is a happy path test which does the following:
 
-        * Create a volume at the configured volume_size.
         * Create a server instance.
         * Attach the volume to the server.
         * Wait for the volume status to be "in-use".
@@ -119,8 +113,6 @@
           if we timeout waiting for the instance action event to show up, or
           if the action on the server fails.
         """
-        # Create a test volume. Will be automatically cleaned up on teardown.
-        volume = self.create_volume()
         # Create a test server. Will be automatically cleaned up on teardown.
         server = self.create_server()
         # Attach the volume to the server and wait for the volume status to be
@@ -182,3 +174,14 @@
             "%(request_id)s." %
             {'result': event['result'],
              'request_id': action['request_id']})
+
+
+class VolumesExtendAttachedTest(BaseVolumesExtendAttachedTest):
+
+    @decorators.idempotent_id('301f5a30-1c6f-4ea0-be1a-91fd28d44354')
+    @testtools.skipUnless(CONF.volume_feature_enabled.extend_attached_volume,
+                          "Attached volume extend is disabled.")
+    @utils.services('compute')
+    def test_extend_attached_volume(self):
+        volume = self.create_volume()
+        self._test_extend_attached_volume(volume)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index 91728ab..28e41bf 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -143,7 +143,6 @@
 class VolumesSummaryTest(base.BaseVolumeTest):
     """Test volume summary"""
 
-    _api_version = 3
     min_microversion = '3.12'
     max_microversion = 'latest'
 
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 60f85a4..1d1981c 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -16,8 +16,8 @@
 
 import operator
 import random
+from urllib.parse import urlparse
 
-from six.moves.urllib.parse import urlparse
 from testtools import matchers
 
 from tempest.api.volume import base
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 76c22f0..35dd0ca 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -13,7 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.api.volume import base
 from tempest.common import utils
@@ -50,7 +50,7 @@
                         self.images_client.delete_image, image['id'])
 
         # Upload image with 1KB data
-        image_file = six.BytesIO(data_utils.random_bytes())
+        image_file = io.BytesIO(data_utils.random_bytes())
         self.images_client.store_image_file(image['id'], image_file)
         waiters.wait_for_image_status(self.images_client,
                                       image['id'], 'active')
@@ -127,14 +127,14 @@
     def test_update_volume_with_nonexistent_volume_id(self):
         """Test updating non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_uuid())
+                          volume_id=data_utils.rand_uuid(), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
     def test_update_volume_with_invalid_volume_id(self):
         """Test updating volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_name('invalid'))
+                          volume_id=data_utils.rand_name('invalid'), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
diff --git a/tempest/clients.py b/tempest/clients.py
index 1db93a0..6080f01 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,7 +44,7 @@
         self._set_object_storage_clients()
         self._set_image_clients()
         self._set_network_clients()
-        self.placement_client = self.placement.PlacementClient()
+        self._set_placement_clients()
         # TODO(andreaf) This is maintained for backward compatibility
         # with plugins, but it should removed eventually, since it was
         # never a stable interface and it's not useful anyways
@@ -72,6 +72,7 @@
         self.qos_client = self.network.QosClient()
         self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
         self.segments_client = self.network.SegmentsClient()
+        self.trunks_client = self.network.TrunksClient()
 
     def _set_image_clients(self):
         if CONF.service_available.glance:
@@ -88,6 +89,14 @@
                 self.image_v2.NamespacePropertiesClient()
             self.namespace_tags_client = self.image_v2.NamespaceTagsClient()
             self.image_versions_client = self.image_v2.VersionsClient()
+            # NOTE(danms): If no alternate endpoint is configured,
+            # this client will work the same as the base self.images_client.
+            # If your test needs to know if these are different, check the
+            # config option to see if the alternate_image_endpoint is set.
+            self.image_client_remote = self.image_v2.ImagesClient(
+                service=CONF.image.alternate_image_endpoint,
+                endpoint_type=CONF.image.alternate_image_endpoint_type,
+                region=CONF.image.region)
 
     def _set_compute_clients(self):
         self.agents_client = self.compute.AgentsClient()
@@ -125,6 +134,8 @@
         self.instance_usages_audit_log_client = (
             self.compute.InstanceUsagesAuditLogClient())
         self.tenant_networks_client = self.compute.TenantNetworksClient()
+        self.assisted_volume_snapshots_client = (
+            self.compute.AssistedVolumeSnapshotsClient())
 
         # NOTE: The following client needs special timeout values because
         # the API is a proxy for the other component.
@@ -139,6 +150,11 @@
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
 
+    def _set_placement_clients(self):
+        self.placement_client = self.placement.PlacementClient()
+        self.resource_providers_client = \
+            self.placement.ResourceProvidersClient()
+
     def _set_identity_clients(self):
         # Clients below use the admin endpoint type of Keystone API v2
         params_v2_admin = {
@@ -203,6 +219,8 @@
             **params_v3)
         self.application_credentials_client = \
             self.identity_v3.ApplicationCredentialsClient(**params_v3)
+        self.access_rules_client = \
+            self.identity_v3.AccessRulesClient(**params_v3)
 
         # Token clients do not use the catalog. They only need default_params.
         # They read auth_url, so they should only be set if the corresponding
@@ -224,89 +242,87 @@
 
     def _set_volume_clients(self):
 
-        # if only api_v3 is enabled, all these clients should be available
-        if (CONF.volume_feature_enabled.api_v2 or
-            CONF.volume_feature_enabled.api_v3):
-            self.backups_client_latest = self.volume_v3.BackupsClient()
-            self.encryption_types_client_latest = \
-                self.volume_v3.EncryptionTypesClient()
-            self.snapshot_manage_client_latest = \
-                self.volume_v3.SnapshotManageClient()
-            self.snapshots_client_latest = self.volume_v3.SnapshotsClient()
-            self.volume_capabilities_client_latest = \
-                self.volume_v3.CapabilitiesClient()
-            self.volume_manage_client_latest = (
-                self.volume_v3.VolumeManageClient())
-            self.volume_qos_client_latest = self.volume_v3.QosSpecsClient()
-            self.volume_services_client_latest = (
-                self.volume_v3.ServicesClient())
-            self.volume_types_client_latest = self.volume_v3.TypesClient()
-            self.volume_hosts_client_latest = self.volume_v3.HostsClient()
-            self.volume_quotas_client_latest = self.volume_v3.QuotasClient()
-            self.volume_quota_classes_client_latest = \
-                self.volume_v3.QuotaClassesClient()
-            self.volume_scheduler_stats_client_latest = \
-                self.volume_v3.SchedulerStatsClient()
-            self.volume_transfers_client_latest = \
-                self.volume_v3.TransfersClient()
-            self.volume_availability_zone_client_latest = \
-                self.volume_v3.AvailabilityZoneClient()
-            self.volume_limits_client_latest = self.volume_v3.LimitsClient()
-            self.volumes_client_latest = self.volume_v3.VolumesClient()
-            self.volumes_extension_client_latest = \
-                self.volume_v3.ExtensionsClient()
-            self.group_types_client_latest = self.volume_v3.GroupTypesClient()
-            self.groups_client_latest = self.volume_v3.GroupsClient()
-            self.group_snapshots_client_latest = \
-                self.volume_v3.GroupSnapshotsClient()
-            self.volume_messages_client_latest = (
-                self.volume_v3.MessagesClient())
-            self.volume_versions_client_latest = (
-                self.volume_v3.VersionsClient())
-            self.attachments_client_latest = (
-                self.volume_v3.AttachmentsClient())
+        self.backups_client_latest = self.volume_v3.BackupsClient()
+        self.encryption_types_client_latest = \
+            self.volume_v3.EncryptionTypesClient()
+        self.snapshot_manage_client_latest = \
+            self.volume_v3.SnapshotManageClient()
+        self.snapshots_client_latest = self.volume_v3.SnapshotsClient()
+        self.volume_capabilities_client_latest = \
+            self.volume_v3.CapabilitiesClient()
+        self.volume_manage_client_latest = (
+            self.volume_v3.VolumeManageClient())
+        self.volume_qos_client_latest = self.volume_v3.QosSpecsClient()
+        self.volume_services_client_latest = (
+            self.volume_v3.ServicesClient())
+        self.volume_types_client_latest = self.volume_v3.TypesClient()
+        self.volume_hosts_client_latest = self.volume_v3.HostsClient()
+        self.volume_quotas_client_latest = self.volume_v3.QuotasClient()
+        self.volume_quota_classes_client_latest = \
+            self.volume_v3.QuotaClassesClient()
+        self.volume_scheduler_stats_client_latest = \
+            self.volume_v3.SchedulerStatsClient()
+        self.volume_transfers_client_latest = \
+            self.volume_v3.TransfersClient()
+        self.volume_transfers_mv355_client_latest = \
+            self.volume_v3.TransfersV355Client()
+        self.volume_availability_zone_client_latest = \
+            self.volume_v3.AvailabilityZoneClient()
+        self.volume_limits_client_latest = self.volume_v3.LimitsClient()
+        self.volumes_client_latest = self.volume_v3.VolumesClient()
+        self.volumes_extension_client_latest = \
+            self.volume_v3.ExtensionsClient()
+        self.group_types_client_latest = self.volume_v3.GroupTypesClient()
+        self.groups_client_latest = self.volume_v3.GroupsClient()
+        self.group_snapshots_client_latest = \
+            self.volume_v3.GroupSnapshotsClient()
+        self.volume_messages_client_latest = (
+            self.volume_v3.MessagesClient())
+        self.volume_versions_client_latest = (
+            self.volume_v3.VersionsClient())
+        self.attachments_client_latest = (
+            self.volume_v3.AttachmentsClient())
 
-            # TODO(gmann): Below alias for service clients have been
-            # deprecated and will be removed in future. Start using the alias
-            # defined above with suffix _latest.
-            # ****************Deprecated alias start from here***************
-            self.backups_v2_client = self.volume_v3.BackupsClient()
-            self.encryption_types_v2_client = \
-                self.volume_v3.EncryptionTypesClient()
-            self.snapshot_manage_v2_client = \
-                self.volume_v3.SnapshotManageClient()
-            self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
-            self.volume_capabilities_v2_client = \
-                self.volume_v3.CapabilitiesClient()
-            self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
-            self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
-            self.volume_services_v2_client = self.volume_v3.ServicesClient()
-            self.volume_types_v2_client = self.volume_v3.TypesClient()
-            self.volume_hosts_v2_client = self.volume_v3.HostsClient()
-            self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
-            self.volume_quota_classes_v2_client = \
-                self.volume_v3.QuotaClassesClient()
-            self.volume_scheduler_stats_v2_client = \
-                self.volume_v3.SchedulerStatsClient()
-            self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
-            self.volume_v2_availability_zone_client = \
-                self.volume_v3.AvailabilityZoneClient()
-            self.volume_v2_limits_client = self.volume_v3.LimitsClient()
-            self.volumes_v2_client = self.volume_v3.VolumesClient()
-            self.volumes_v2_extension_client = \
-                self.volume_v3.ExtensionsClient()
+        # TODO(gmann): Below alias for service clients have been
+        # deprecated and will be removed in future. Start using the alias
+        # defined above with suffix _latest.
+        # ****************Deprecated alias start from here***************
+        self.backups_v2_client = self.volume_v3.BackupsClient()
+        self.encryption_types_v2_client = \
+            self.volume_v3.EncryptionTypesClient()
+        self.snapshot_manage_v2_client = \
+            self.volume_v3.SnapshotManageClient()
+        self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
+        self.volume_capabilities_v2_client = \
+            self.volume_v3.CapabilitiesClient()
+        self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
+        self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
+        self.volume_services_v2_client = self.volume_v3.ServicesClient()
+        self.volume_types_v2_client = self.volume_v3.TypesClient()
+        self.volume_hosts_v2_client = self.volume_v3.HostsClient()
+        self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
+        self.volume_quota_classes_v2_client = \
+            self.volume_v3.QuotaClassesClient()
+        self.volume_scheduler_stats_v2_client = \
+            self.volume_v3.SchedulerStatsClient()
+        self.volume_transfers_v2_client = self.volume_v3.TransfersClient()
+        self.volume_v2_availability_zone_client = \
+            self.volume_v3.AvailabilityZoneClient()
+        self.volume_v2_limits_client = self.volume_v3.LimitsClient()
+        self.volumes_v2_client = self.volume_v3.VolumesClient()
+        self.volumes_v2_extension_client = \
+            self.volume_v3.ExtensionsClient()
 
-        if CONF.volume_feature_enabled.api_v3:
-            self.backups_v3_client = self.volume_v3.BackupsClient()
-            self.group_types_v3_client = self.volume_v3.GroupTypesClient()
-            self.groups_v3_client = self.volume_v3.GroupsClient()
-            self.group_snapshots_v3_client = \
-                self.volume_v3.GroupSnapshotsClient()
-            self.snapshots_v3_client = self.volume_v3.SnapshotsClient()
-            self.volume_v3_messages_client = self.volume_v3.MessagesClient()
-            self.volume_v3_versions_client = self.volume_v3.VersionsClient()
-            self.volumes_v3_client = self.volume_v3.VolumesClient()
-            # ****************Deprecated alias end here***********************
+        self.backups_v3_client = self.volume_v3.BackupsClient()
+        self.group_types_v3_client = self.volume_v3.GroupTypesClient()
+        self.groups_v3_client = self.volume_v3.GroupsClient()
+        self.group_snapshots_v3_client = \
+            self.volume_v3.GroupSnapshotsClient()
+        self.snapshots_v3_client = self.volume_v3.SnapshotsClient()
+        self.volume_v3_messages_client = self.volume_v3.MessagesClient()
+        self.volume_v3_versions_client = self.volume_v3.VersionsClient()
+        self.volumes_v3_client = self.volume_v3.VolumesClient()
+        # ****************Deprecated alias end here***********************
 
     def _set_object_storage_clients(self):
         self.account_client = self.object_storage.AccountClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ff552a1..917262e 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -270,7 +270,7 @@
                 config.CONF.set_config_path(parsed_args.config_file)
             setup_logging()
             resources = []
-            for count in range(parsed_args.concurrency):
+            for _ in range(parsed_args.concurrency):
                 # Use N different cred_providers to obtain different
                 # sets of creds
                 cred_provider = get_credential_provider(parsed_args)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 84d2492..f2370f3 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_log import log as logging
-from six.moves.urllib import parse as urllib
 
 from tempest import clients
 from tempest.common import credentials_factory as credentials
diff --git a/tempest/cmd/init.py b/tempest/cmd/init.py
index d84f3a3..3eae552 100644
--- a/tempest/cmd/init.py
+++ b/tempest/cmd/init.py
@@ -12,6 +12,7 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import configparser
 import os
 import shutil
 import sys
@@ -19,7 +20,6 @@
 from cliff import command
 from oslo_config import generator
 from oslo_log import log as logging
-from six import moves
 from stestr import commands
 
 from tempest.cmd import workspace
@@ -44,11 +44,14 @@
 
     :return: default config dir
     """
+    # NOTE: The default directory should be on a Linux box.
     global_conf_dir = '/etc/tempest'
     xdg_config = os.environ.get('XDG_CONFIG_HOME',
-                                os.path.expanduser('~/.config'))
+                                os.path.expanduser(os.path.join('~',
+                                                                '.config')))
     user_xdg_global_path = os.path.join(xdg_config, 'tempest')
-    user_global_path = os.path.join(os.path.expanduser('~'), '.tempest/etc')
+    user_global_path = os.path.join(os.path.expanduser('~'),
+                                    '.tempest', 'etc')
     if os.path.isdir(global_conf_dir):
         return global_conf_dir
     elif os.path.isdir(user_xdg_global_path):
@@ -89,7 +92,7 @@
             stestr_conf_file.write(stestr_conf)
 
     def get_configparser(self, conf_path):
-        config_parse = moves.configparser.ConfigParser()
+        config_parse = configparser.ConfigParser()
         config_parse.optionxform = str
         # get any existing values if a config file already exists
         if os.path.isfile(conf_path):
@@ -121,7 +124,7 @@
     def generate_sample_config(self, local_dir):
         conf_generator = os.path.join(os.path.dirname(__file__),
                                       'config-generator.tempest.conf')
-        output_file = os.path.join(local_dir, 'etc/tempest.conf.sample')
+        output_file = os.path.join(local_dir, 'etc', 'tempest.conf.sample')
         if os.path.isfile(conf_generator):
             generator.main(['--config-file', conf_generator, '--output-file',
                             output_file])
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index d82b6df..2669ff7 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -22,10 +22,10 @@
 * ``--regex/-r``: This is a selection regex like what stestr uses. It will run
   any tests that match on re.match() with the regex
 * ``--smoke/-s``: Run all the tests tagged as smoke
-* ``--black-regex``: It allows to do simple test exclusion via passing a
-  rejection/black regexp
+* ``--exclude-regex``: It allows to do simple test exclusion via passing a
+  rejection/exclude regexp
 
-There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
+There are also the ``--exclude-list`` and ``--include-list`` options that
 let you pass a filepath to tempest run with the file format being a line
 separated regex, with '#' used to signify the start of a comment on a line.
 For example::
@@ -128,8 +128,8 @@
 import sys
 
 from cliff import command
+from oslo_log import log
 from oslo_serialization import jsonutils as json
-import six
 from stestr import commands
 
 from tempest import clients
@@ -139,13 +139,11 @@
 from tempest.common import credentials_factory as credentials
 from tempest import config
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 CONF = config.CONF
 SAVED_STATE_JSON = "saved_state.json"
 
+LOG = log.getLogger(__name__)
+
 
 class TempestRun(command.Command):
 
@@ -167,7 +165,7 @@
         # environment variable and fall back to "python", under python3
         # if it does not exist. we should set it to the python3 executable
         # to deal with this situation better for now.
-        if six.PY3 and 'PYTHON' not in os.environ:
+        if 'PYTHON' not in os.environ:
             os.environ['PYTHON'] = sys.executable
 
     def _create_stestr_conf(self):
@@ -206,23 +204,71 @@
             self._init_state()
 
         regex = self._build_regex(parsed_args)
+
+        # temporary method for parsing deprecated and new stestr options
+        # and showing warning messages in order to make the transition
+        # smoother for all tempest consumers
+        # TODO(kopecmartin) remove this after stestr>=3.1.0 is used
+        # in all supported OpenStack releases
+        def parse_dep(old_o, old_v, new_o, new_v):
+            ret = ''
+            if old_v:
+                LOG.warning("'%s' option is deprecated, use '%s' instead "
+                            "which is functionally equivalent. Right now "
+                            "Tempest still supports this option for "
+                            "backward compatibility, however, it will be "
+                            "removed soon.",
+                            old_o, new_o)
+                ret = old_v
+            if old_v and new_v:
+                # both options are specified
+                LOG.warning("'%s' and '%s' are specified at the same time, "
+                            "'%s' takes precedence over '%s'",
+                            new_o, old_o, new_o, old_o)
+            if new_v:
+                ret = new_v
+            return ret
+        ex_regex = parse_dep('--black-regex', parsed_args.black_regex,
+                             '--exclude-regex', parsed_args.exclude_regex)
+        ex_list = parse_dep('--blacklist-file', parsed_args.blacklist_file,
+                            '--exclude-list', parsed_args.exclude_list)
+        in_list = parse_dep('--whitelist-file', parsed_args.whitelist_file,
+                            '--include-list', parsed_args.include_list)
+
         return_code = 0
         if parsed_args.list_tests:
-            return_code = commands.list_command(
-                filters=regex, whitelist_file=parsed_args.whitelist_file,
-                blacklist_file=parsed_args.blacklist_file,
-                black_regex=parsed_args.black_regex)
+            try:
+                return_code = commands.list_command(
+                    filters=regex, include_list=in_list,
+                    exclude_list=ex_list, exclude_regex=ex_regex)
+            except TypeError:
+                # exclude_list, include_list and exclude_regex are defined only
+                # in stestr >= 3.1.0, this except block catches the case when
+                # tempest is executed with an older stestr
+                return_code = commands.list_command(
+                    filters=regex, whitelist_file=in_list,
+                    blacklist_file=ex_list, black_regex=ex_regex)
 
         else:
             serial = not parsed_args.parallel
-            return_code = commands.run_command(
-                filters=regex, subunit_out=parsed_args.subunit,
-                serial=serial, concurrency=parsed_args.concurrency,
-                blacklist_file=parsed_args.blacklist_file,
-                whitelist_file=parsed_args.whitelist_file,
-                black_regex=parsed_args.black_regex,
-                worker_path=parsed_args.worker_file,
-                load_list=parsed_args.load_list, combine=parsed_args.combine)
+            params = {
+                'filters': regex, 'subunit_out': parsed_args.subunit,
+                'serial': serial, 'concurrency': parsed_args.concurrency,
+                'worker_path': parsed_args.worker_file,
+                'load_list': parsed_args.load_list,
+                'combine': parsed_args.combine
+            }
+            try:
+                return_code = commands.run_command(
+                    **params, exclude_list=ex_list,
+                    include_list=in_list, exclude_regex=ex_regex)
+            except TypeError:
+                # exclude_list, include_list and exclude_regex are defined only
+                # in stestr >= 3.1.0, this except block catches the case when
+                # tempest is executed with an older stestr
+                return_code = commands.run_command(
+                    **params, blacklist_file=ex_list,
+                    whitelist_file=in_list, black_regex=ex_regex)
             if return_code > 0:
                 sys.exit(return_code)
         return return_code
@@ -276,15 +322,38 @@
                            help='A normal stestr selection regex used to '
                                 'specify a subset of tests to run')
         parser.add_argument('--black-regex', dest='black_regex',
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --exclude-regex '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--exclude-regex, this flag will be ignored '
+                                 'and --exclude-regex will be used')
+        parser.add_argument('--exclude-regex', dest='exclude_regex',
                             help='A regex to exclude tests that match it')
         parser.add_argument('--whitelist-file', '--whitelist_file',
-                            help="Path to a whitelist file, this file "
-                            "contains a separate regex on each "
-                            "newline.")
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --include-list '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--include-list, this flag will be ignored '
+                                 'and --include-list will be used')
+        parser.add_argument('--include-list', '--include_list',
+                            help="Path to an include file which contains the "
+                                 "regex for tests to be included in tempest "
+                                 "run, this file contains a separate regex on "
+                                 "each newline.")
         parser.add_argument('--blacklist-file', '--blacklist_file',
-                            help='Path to a blacklist file, this file '
-                                 'contains a separate regex exclude on '
-                                 'each newline')
+                            help='DEPRECATED: This option is deprecated and '
+                                 'will be removed soon, use --exclude-list '
+                                 'which is functionally equivalent. If this '
+                                 'is specified at the same time as '
+                                 '--exclude-list, this flag will be ignored '
+                                 'and --exclude-list will be used')
+        parser.add_argument('--exclude-list', '--exclude_list',
+                            help='Path to an exclude file which contains the '
+                                 'regex for tests to be excluded in tempest '
+                                 'run, this file contains a separate regex on '
+                                 'each newline.')
         parser.add_argument('--load-list', '--load_list',
                             help='Path to a non-regex whitelist file, '
                                  'this file contains a separate test '
diff --git a/tempest/cmd/subunit_describe_calls.py b/tempest/cmd/subunit_describe_calls.py
index 172fbaa..6c36d82 100644
--- a/tempest/cmd/subunit_describe_calls.py
+++ b/tempest/cmd/subunit_describe_calls.py
@@ -40,12 +40,14 @@
 
 subunit-describe-calls will take in either stdin subunit v1 or v2 stream or a
 file path which contains either a subunit v1 or v2 stream passed via the
---subunit parameter. This is then parsed checking for details contained in the
-file_bytes of the --non-subunit-name parameter (the default is pythonlogging
-which is what Tempest uses to store logs). By default the OpenStack Kilo
-release port defaults (http://bit.ly/22jpF5P) are used unless a file is
-provided via the --ports option. The resulting output is dumped in JSON output
-to the path provided in the --output-file option.
+``--subunit`` parameter. This is then parsed checking for details contained in
+the file_bytes of the ``--non-subunit-name`` parameter (the default is
+pythonlogging which is what Tempest uses to store logs). By default `the
+OpenStack default ports
+<https://docs.openstack.org/install-guide/firewalls-default-ports.html>`_
+are used unless a file is provided via the ``--ports`` option. The resulting
+output is dumped in JSON output to the path provided in the ``--output-file``
+option.
 
 Ports file JSON structure
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -110,9 +112,8 @@
     response_re = re.compile(r'.* Response - Headers: (?P<headers>.*)')
     body_re = re.compile(r'.*Body: (?P<body>.*)')
 
-    # Based on newton defaults:
-    # http://docs.openstack.org/newton/config-reference/
-    # firewalls-default-ports.html
+    # Based on OpenStack default ports:
+    # https://docs.openstack.org/install-guide/firewalls-default-ports.html
     services = {
         "8776": "Block Storage",
         "8774": "Nova",
@@ -131,7 +132,10 @@
         "3260": "iSCSI",
         "3306": "MySQL",
         "5672": "AMQP",
-        "8082": "murano"}
+        "8082": "murano",
+        "8778": "Clustering",
+        "8999": "Vitrage",
+        "8989": "Mistral"}
 
     def __init__(self, services=None):
         super(UrlParser, self).__init__()
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 235d8e3..0db1ab1 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -202,22 +202,8 @@
                             not CONF.identity_feature_enabled.api_v3, update)
 
 
-def verify_cinder_api_versions(os, update):
-    # Check cinder api versions
-    versions = _get_api_versions(os, 'cinder')
-    if (CONF.volume_feature_enabled.api_v2 !=
-            contains_version('v2.', versions)):
-        print_and_or_update('api_v2', 'volume-feature-enabled',
-                            not CONF.volume_feature_enabled.api_v2, update)
-    if (CONF.volume_feature_enabled.api_v3 !=
-            contains_version('v3.', versions)):
-        print_and_or_update('api_v3', 'volume-feature-enabled',
-                            not CONF.volume_feature_enabled.api_v3, update)
-
-
 def verify_api_versions(os, service, update):
     verify = {
-        'cinder': verify_cinder_api_versions,
         'glance': verify_glance_api_versions,
         'keystone': verify_keystone_api_versions,
     }
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index edb9d16..a062f6f 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -18,9 +18,7 @@
 import ssl
 import struct
 import textwrap
-
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from oslo_log import log as logging
 from oslo_utils import excutils
@@ -31,11 +29,6 @@
 from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
@@ -64,7 +57,7 @@
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
-                       image_id=None, **kwargs):
+                       image_id=None, wait_for_sshable=True, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -100,6 +93,8 @@
         CONF.compute.flavor_ref will be used instead.
     :param image_id: ID of the image to be used to provision the server. If not
         defined, CONF.compute.image_ref will be used instead.
+    :param wait_for_sshable: Check server's console log and wait until it will
+        be ready to login.
     :returns: a tuple
     """
 
@@ -270,6 +265,10 @@
                             LOG.exception('Server %s failed to delete in time',
                                           server['id'])
 
+    if (validatable and CONF.compute_feature_enabled.console_output and
+            wait_for_sshable):
+        waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
+
     return body, servers
 
 
@@ -365,8 +364,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord_func(header[1]) & 127 > 0:
-                return self._recv(ord_func(header[1]) & 127)
+            if int(header[1]) & 127 > 0:
+                return self._recv(int(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -383,7 +382,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+            frame_bytes.append(int(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index c6e5dcb..2d486a7 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -245,6 +245,9 @@
 
     if identity_version == 'v3':
         conf_attributes.append('domain_name')
+        conf_attributes.append('user_domain_name')
+        conf_attributes.append('project_domain_name')
+        conf_attributes.append('system')
     # Read the parts of credentials from config
     params = config.service_client_config()
     for attr in conf_attributes:
@@ -284,7 +287,8 @@
     if identity_version == 'v3':
         domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
                             if 'domain' in x)
-        if not domain_fields.intersection(kwargs.keys()):
+        if (not params.get('system') and
+                not domain_fields.intersection(kwargs.keys())):
             domain_name = CONF.auth.default_credentials_domain_name
             # NOTE(andreaf) Setting domain_name implicitly sets user and
             # project domain names, if they are None
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 167bf5b..88a16b7 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -13,37 +13,17 @@
 #    under the License.
 
 import functools
-from functools import partial
 
 import testtools
 
 from tempest import config
 from tempest.exceptions import InvalidServiceTag
-from tempest.lib.common.utils import data_utils as lib_data_utils
 from tempest.lib import decorators
 
 
 CONF = config.CONF
 
 
-class DataUtils(object):
-    def __getattr__(self, attr):
-
-        if attr == 'rand_name':
-            # NOTE(flwang): This is a proxy to generate a random name that
-            # includes a random number and a prefix 'tempest'
-            attr_obj = partial(lib_data_utils.rand_name,
-                               prefix='tempest')
-        else:
-            attr_obj = getattr(lib_data_utils, attr)
-
-        self.__dict__[attr] = attr_obj
-        return attr_obj
-
-
-data_utils = DataUtils()
-
-
 def get_service_list():
     service_list = {
         'compute': CONF.service_available.nova,
@@ -59,6 +39,7 @@
         # So we should set this True here.
         'identity': True,
         'object_storage': CONF.service_available.swift,
+        'dashboard': CONF.service_available.horizon,
     }
     return service_list
 
@@ -128,3 +109,18 @@
     if extension_name in config_dict[service]:
         return True
     return False
+
+
+def is_network_feature_enabled(feature_name):
+    """A function that will check the list of available network features
+
+    """
+    list_of_features = CONF.network_feature_enabled.available_features
+
+    if not list_of_features:
+        return False
+    if list_of_features[0] == 'all':
+        return True
+    if feature_name in list_of_features:
+        return True
+    return False
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 14790d6..3750b11 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -124,12 +124,18 @@
             raise lib_exc.DeleteErrorException(
                 "Server %s failed to delete and is in ERROR status" %
                 server_id)
+
         if server_status == 'SOFT_DELETED':
             # Soft-deleted instances need to be forcibly deleted to
             # prevent some test cases from failing.
             LOG.debug("Automatically force-deleting soft-deleted server %s",
                       server_id)
-            client.force_delete_server(server_id)
+            try:
+                client.force_delete_server(server_id)
+            except lib_exc.NotFound:
+                # The instance may have been deleted so ignore
+                # NotFound exception
+                return
 
         if int(time.time()) - start_time >= client.build_timeout:
             raise lib_exc.TimeoutException
@@ -187,6 +193,91 @@
     raise lib_exc.TimeoutException(message)
 
 
+def wait_for_image_tasks_status(client, image_id, status):
+    """Waits for an image tasks to reach a given status."""
+    pending_tasks = []
+    start = int(time.time())
+    while int(time.time()) - start < client.build_timeout:
+        tasks = client.show_image_tasks(image_id)['tasks']
+
+        pending_tasks = [task for task in tasks if task['status'] != status]
+        if not pending_tasks:
+            return tasks
+        time.sleep(client.build_interval)
+
+    message = ('Image %(image_id)s tasks: %(pending_tasks)s '
+               'failed to reach %(status)s state within the required '
+               'time (%(timeout)s s).' % {'image_id': image_id,
+                                          'pending_tasks': pending_tasks,
+                                          'status': status,
+                                          'timeout': client.build_timeout})
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
+def wait_for_image_imported_to_stores(client, image_id, stores=None):
+    """Waits for an image to be imported to all requested stores.
+
+    Short circuits to fail if the serer reports failure of any store.
+    If stores is None, just wait for status==active.
+
+    The client should also have build_interval and build_timeout attributes.
+    """
+
+    exc_cls = lib_exc.TimeoutException
+    start = int(time.time())
+    while int(time.time()) - start < client.build_timeout:
+        image = client.show_image(image_id)
+        if image['status'] == 'active' and (stores is None or
+                                            image['stores'] == stores):
+            return
+        if image.get('os_glance_failed_import'):
+            exc_cls = lib_exc.OtherRestClientException
+            break
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to import on stores: %s' %
+               (image_id, str(image.get('os_glance_failed_import'))))
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise exc_cls(message)
+
+
+def wait_for_image_copied_to_stores(client, image_id):
+    """Waits for an image to be copied on all requested stores.
+
+    The client should also have build_interval and build_timeout attributes.
+    This return the list of stores where copy is failed.
+    """
+
+    start = int(time.time())
+    store_left = []
+    while int(time.time()) - start < client.build_timeout:
+        image = client.show_image(image_id)
+        store_left = image.get('os_glance_importing_to_stores')
+        # NOTE(danms): If os_glance_importing_to_stores is None, then
+        # we've raced with the startup of the task and should continue
+        # to wait.
+        if store_left is not None and not store_left:
+            return image['os_glance_failed_import']
+        if image['status'].lower() == 'killed':
+            raise exceptions.ImageKilledException(image_id=image_id,
+                                                  status=image['status'])
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to finish the copy operation '
+               'on stores: %s' % (image_id, str(store_left)))
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
 def wait_for_volume_resource_status(client, resource_id, status):
     """Waits for a volume resource to reach a given status.
 
@@ -223,6 +314,25 @@
              resource_name, resource_id, status, time.time() - start)
 
 
+def wait_for_volume_attachment_create(client, volume_id, server_id):
+    """Waits for a volume attachment to be created at a given volume."""
+    start = int(time.time())
+    while True:
+        attachments = client.show_volume(volume_id)['volume']['attachments']
+        found = [a for a in attachments if a['server_id'] == server_id]
+        if found:
+            LOG.info('Attachment %s created for volume %s to server %s after '
+                     'waiting for %f seconds', found[0]['attachment_id'],
+                     volume_id, server_id, time.time() - start)
+            return found[0]
+        time.sleep(client.build_interval)
+        if int(time.time()) - start >= client.build_timeout:
+            message = ('Failed to attach volume %s to server %s '
+                       'within the required time (%s s).' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+
+
 def wait_for_volume_attachment_remove(client, volume_id, attachment_id):
     """Waits for a volume attachment to be removed from a given volume."""
     start = int(time.time())
@@ -239,6 +349,32 @@
              'seconds', attachment_id, volume_id, time.time() - start)
 
 
+def wait_for_volume_attachment_remove_from_server(
+        client, server_id, volume_id):
+    """Waits for a volume to be removed from a given server.
+
+    This waiter checks the compute API if the volume attachment is removed.
+    """
+    start = int(time.time())
+    volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+    while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+        time.sleep(client.build_interval)
+
+        timed_out = int(time.time()) - start >= client.build_timeout
+        if timed_out:
+            message = ('Volume %s failed to detach from server %s within '
+                       'the required time (%s s) from the compute API '
+                       'perspective' %
+                       (volume_id, server_id, client.build_timeout))
+            raise lib_exc.TimeoutException(message)
+
+        volumes = client.list_volume_attachments(server_id)[
+            'volumeAttachments']
+
+    return volumes
+
+
 def wait_for_volume_migration(client, volume_id, new_host):
     """Waits for a Volume to move to a new host."""
     body = client.show_volume(volume_id)['volume']
@@ -359,3 +495,20 @@
                        'the required time (%s s)' % (port_id, server_id,
                                                      client.build_timeout))
             raise lib_exc.TimeoutException(message)
+
+
+def wait_for_guest_os_boot(client, server_id):
+    start_time = int(time.time())
+    while True:
+        console_output = client.get_console_output(server_id)['output']
+        for line in console_output.split('\n'):
+            if 'login:' in line.lower():
+                return
+        if int(time.time()) - start_time >= client.build_timeout:
+            LOG.info("Guest OS on server %s probably isn't ready or its "
+                     "console log can't be parsed properly. If guest OS "
+                     "isn't ready, that may cause problems with SSH to "
+                     "the server.",
+                     server_id)
+            return
+        time.sleep(client.build_interval)
diff --git a/tempest/config.py b/tempest/config.py
index 989b8ef..c409db6 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -92,7 +92,24 @@
     cfg.StrOpt('admin_domain_name',
                default='Default',
                help="Admin domain name for authentication (Keystone V3). "
-                    "The same domain applies to user and project"),
+                    "The same domain applies to user and project if "
+                    "admin_user_domain_name and admin_project_domain_name "
+                    "are not specified"),
+    cfg.StrOpt('admin_user_domain_name',
+               help="Domain name that contains the admin user (Keystone V3). "
+                    "May be different from admin_project_domain_name and "
+                    "admin_domain_name"),
+    cfg.StrOpt('admin_project_domain_name',
+               help="Domain name that contains the project given by "
+                    "admin_project_name (Keystone V3). May be different from "
+                    "admin_user_domain_name and admin_domain_name"),
+    cfg.StrOpt('admin_system',
+               default=None,
+               help="The system scope on which an admin user has an admin "
+                    "role assignment, if any. Valid values are 'all' or None. "
+                    "This must be set to 'all' if using the "
+                    "[oslo_policy]/enforce_scope=true option for the "
+                    "identity service."),
 ]
 
 identity_group = cfg.OptGroup(name='identity',
@@ -250,6 +267,11 @@
                 default=False,
                 help='Does the environment have application credentials '
                      'enabled?'),
+    # Access rules for application credentials is a default feature in Train.
+    # This config option can removed once Stein is EOL.
+    cfg.BoolOpt('access_rules',
+                default=False,
+                help='Does the environment have access rules enabled?'),
     cfg.BoolOpt('immutable_user_source',
                 default=False,
                 help='Set to True if the environment has a read-only '
@@ -447,6 +469,10 @@
     cfg.BoolOpt('shelve',
                 default=True,
                 help="Does the test environment support shelving/unshelving?"),
+    cfg.BoolOpt('shelve_migrate',
+                default=False,
+                help="Does the test environment support "
+                     "cold migration of unshelved server?"),
     cfg.BoolOpt('suspend',
                 default=True,
                 help="Does the test environment support suspend/resume?"),
@@ -598,6 +624,22 @@
                 help='Does the test environment support attaching a volume to '
                      'more than one instance? This depends on hypervisor and '
                      'volume backend/type and compute API version 2.60.'),
+    cfg.BoolOpt('xenapi_apis',
+                default=False,
+                help='Does the test environment support the XenAPI-specific '
+                     'APIs: os-agents, writeable server metadata and the '
+                     'resetNetwork server action? '
+                     'These were removed in Victoria alongside the XenAPI '
+                     'virt driver.',
+                deprecated_for_removal=True,
+                deprecated_reason="On Nova side, XenAPI virt driver and the "
+                                  "APIs that only worked with that driver "
+                                  "have been removed and there's nothing to "
+                                  "test after Ussuri."),
+    cfg.BoolOpt('ide_bus',
+                default=True,
+                help='Does the test environment support attaching devices '
+                     'using an IDE bus to the instance?'),
 ]
 
 
@@ -619,6 +661,15 @@
                choices=['public', 'admin', 'internal',
                         'publicURL', 'adminURL', 'internalURL'],
                help="The endpoint type to use for the image service."),
+    cfg.StrOpt('alternate_image_endpoint',
+               default=None,
+               help="Alternate endpoint name for cross-worker testing"),
+    cfg.StrOpt('alternate_image_endpoint_type',
+               default='publicURL',
+               choices=['public', 'admin', 'internal',
+                        'publicURL', 'adminURL', 'internalURL'],
+               help=("The endpoint type to use for the alternate image "
+                     "service.")),
     cfg.StrOpt('http_image',
                default='http://download.cirros-cloud.net/0.3.1/'
                'cirros-0.3.1-x86_64-uec.tar.gz',
@@ -668,6 +719,13 @@
     cfg.BoolOpt('import_image',
                 default=False,
                 help="Is image import feature enabled"),
+    # NOTE(danms): Starting mid-Wallaby glance began enforcing the
+    # previously-informal requirement that os_glance_* properties are
+    # reserved for internal use. Thus, we can only run these checks
+    # if we know we are on a new enough glance.
+    cfg.BoolOpt('os_glance_reserved',
+                default=False,
+                help="Should we check that os_glance namespace is reserved"),
 ]
 
 network_group = cfg.OptGroup(name='network',
@@ -740,11 +798,13 @@
                 deprecated_reason="This config option is no longer "
                                   "used anywhere, so it can be removed."),
     cfg.StrOpt('port_vnic_type',
-               choices=[None, 'normal', 'direct', 'macvtap'],
+               choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
+                        'baremetal', 'virtio-forwarder'],
                help="vnic_type to use when launching instances"
                     " with pre-configured ports."
                     " Supported ports are:"
-                    " ['normal','direct','macvtap']"),
+                    " ['normal', 'direct', 'macvtap', 'direct-physical', "
+                    "'baremetal', 'virtio-forwarder']"),
     cfg.Opt('port_profile',
             type=ProfileType,
             default={},
@@ -766,29 +826,37 @@
 NetworkFeaturesGroup = [
     cfg.BoolOpt('ipv6',
                 default=True,
-                help="Allow the execution of IPv6 tests"),
+                help="Allow the execution of IPv6 tests."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help="A list of enabled network extensions with a special "
                      "entry all which indicates every extension is enabled. "
                      "Empty list indicates all extensions are disabled. "
-                     "To get the list of extensions run: 'neutron ext-list'"),
+                     "To get the list of extensions run: "
+                     "'openstack extension list --network'"),
+    cfg.ListOpt('available_features',
+                default=['all'],
+                help="A list of available network features with a special "
+                     "entry all that indicates every feature is available. "
+                     "Empty list indicates all features are disabled. "
+                     "This list can contain features that are not "
+                     "discoverable through the API."),
     cfg.BoolOpt('ipv6_subnet_attributes',
                 default=False,
                 help="Allow the execution of IPv6 subnet tests that use "
                      "the extended IPv6 attributes ipv6_ra_mode "
-                     "and ipv6_address_mode"
+                     "and ipv6_address_mode."
                 ),
     cfg.BoolOpt('port_admin_state_change',
                 default=True,
-                help="Does the test environment support changing"
-                     " port admin state"),
+                help="Does the test environment support changing "
+                     "port admin state?"),
     cfg.BoolOpt('port_security',
                 default=False,
                 help="Does the test environment support port security?"),
     cfg.BoolOpt('floating_ips',
                 default=True,
-                help='Does the test environment support floating_ips'),
+                help='Does the test environment support floating_ips?'),
     cfg.StrOpt('qos_placement_physnet', default=None,
                help='Name of the physnet for placement based minimum '
                     'bandwidth allocation.'),
@@ -797,6 +865,18 @@
                     'This value will be increased in case of conflict.')
 ]
 
+dashboard_group = cfg.OptGroup(name="dashboard",
+                               title="Dashboard options")
+
+DashboardGroup = [
+    cfg.StrOpt('dashboard_url',
+               default='http://localhost/',
+               help="Where the dashboard can be found"),
+    cfg.BoolOpt('disable_ssl_certificate_validation',
+                default=False,
+                help="Set to True if using self-signed SSL certificates."),
+]
+
 validation_group = cfg.OptGroup(name='validation',
                                 title='SSH Validation options')
 
@@ -841,10 +921,17 @@
     cfg.StrOpt('image_ssh_user',
                default="root",
                help="User name used to authenticate to an instance."),
+    cfg.StrOpt('image_alt_ssh_user',
+               default="root",
+               help="User name used to authenticate to an alt instance."),
     cfg.StrOpt('image_ssh_password',
                default="password",
                help="Password used to authenticate to an instance.",
                secret=True),
+    cfg.StrOpt('image_alt_ssh_password',
+               default="password",
+               help="Password used to authenticate to an alt instance.",
+               secret=True),
     cfg.StrOpt('ssh_shell_prologue',
                default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
                help="Shell fragments to use before executing a command "
@@ -898,6 +985,9 @@
                 default=['BACKEND_1', 'BACKEND_2'],
                 help='A list of backend names separated by comma. '
                      'The backend name must be declared in cinder.conf'),
+    cfg.StrOpt('volume_type',
+               default='',
+               help='Volume type to be used while creating volume.'),
     cfg.StrOpt('storage_protocol',
                default='iSCSI',
                help='Backend protocol to target when creating volume types'),
@@ -969,38 +1059,21 @@
                 help='A list of enabled volume extensions with a special '
                      'entry all which indicates every extension is enabled. '
                      'Empty list indicates all extensions are disabled'),
-    cfg.BoolOpt('api_v2',
-                default=True,
-                help="Is the v2 volume API enabled",
-                deprecated_for_removal=True,
-                deprecated_reason="The v2 volume API has been deprecated "
-                                  "since Pike release. Now Tempest run all "
-                                  "the volume tests against v2 or v3 API "
-                                  "based on CONF.volume.catalog_type which "
-                                  "makes this config option unusable. If "
-                                  "catalog_type is volumev2, then all the "
-                                  "volume tests will run against v2 API. "
-                                  "Use ``CONF.volume.catalog_type`` to run "
-                                  "the Tempest against volume v2 or v3 API"),
-    cfg.BoolOpt('api_v3',
-                default=True,
-                help="Is the v3 volume API enabled",
-                deprecated_for_removal=True,
-                deprecated_reason="Tempest run all the volume tests against "
-                                  "v2 or v3 API based on "
-                                  "CONF.volume.catalog_type which makes this "
-                                  "config option unusable. If catalog_type is "
-                                  "volumev3 which is default, then all the "
-                                  "volume tests will run against v3 API. "
-                                  "Use ``CONF.volume.catalog_type`` to run "
-                                  "the Tempest against volume v2 or v3 API"),
     cfg.BoolOpt('extend_attached_volume',
                 default=False,
                 help='Does the cloud support extending the size of a volume '
                      'which is currently attached to a server instance? This '
                      'depends on the 3.42 volume API microversion and the '
                      '2.51 compute API microversion. Also, not all volume or '
-                     'compute backends support this operation.')
+                     'compute backends support this operation.'),
+    cfg.BoolOpt('extend_attached_encrypted_volume',
+                default=False,
+                help='Does the cloud support extending the size of an '
+                     'encrypted volume  which is currently attached to a '
+                     'server instance? This depends on the 3.42 volume API '
+                     'microversion and the 2.51 compute API microversion. '
+                     'Also, not all volume or compute backends support this '
+                     'operation.')
 ]
 
 
@@ -1075,12 +1148,6 @@
 scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
 
 ScenarioGroup = [
-    cfg.StrOpt('img_dir',
-               default='/opt/stack/new/devstack/files/images/'
-               'cirros-0.3.1-x86_64-uec',
-               help='Directory containing image files, this has been '
-                    'deprecated - img_file option contains a full path now.',
-               deprecated_for_removal=True),
     cfg.StrOpt('img_file', deprecated_name='qcow2_img_file',
                default='/opt/stack/new/devstack/files/images'
                '/cirros-0.3.1-x86_64-disk.img',
@@ -1127,6 +1194,42 @@
     cfg.BoolOpt('nova',
                 default=True,
                 help="Whether or not nova is expected to be available"),
+    cfg.BoolOpt('horizon',
+                default=True,
+                help="Whether or not horizon is expected to be available"),
+]
+
+enforce_scope_group = cfg.OptGroup(name="enforce_scope",
+                                   title="OpenStack Services with "
+                                         "enforce scope")
+
+
+EnforceScopeGroup = [
+    cfg.BoolOpt('nova',
+                default=False,
+                help='Does the compute service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'nova.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('neutron',
+                default=False,
+                help='Does the network service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'neutron.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('glance',
+                default=False,
+                help='Does the Image service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'glance.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('cinder',
+                default=False,
+                help='Does the Volume service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'cinder.conf: [oslo_policy].enforce_scope option.'),
+    cfg.BoolOpt('keystone',
+                default=False,
+                help='Does the Identity service API policies enforce scope? '
+                     'This configuration value should be same as '
+                     'keystone.conf: [oslo_policy].enforce_scope option.'),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
@@ -1174,7 +1277,7 @@
 
 The best use case is investigating used resources of one test.
 A test can be run as follows:
- $ ostestr --pdb TEST_ID
+ $ stestr run --pdb TEST_ID
 or
  $ python -m testtools.run TEST_ID"""),
 ]
@@ -1190,6 +1293,7 @@
     (image_feature_group, ImageFeaturesGroup),
     (network_group, NetworkGroup),
     (network_feature_group, NetworkFeaturesGroup),
+    (dashboard_group, DashboardGroup),
     (validation_group, ValidationGroup),
     (volume_group, VolumeGroup),
     (volume_feature_group, VolumeFeaturesGroup),
@@ -1197,6 +1301,7 @@
     (object_storage_feature_group, ObjectStoreFeaturesGroup),
     (scenario_group, ScenarioGroup),
     (service_available_group, ServiceAvailableGroup),
+    (enforce_scope_group, EnforceScopeGroup),
     (debug_group, DebugGroup),
     (placement_group, PlacementGroup),
     (profiler_group, ProfilerGroup),
@@ -1257,6 +1362,7 @@
         self.image_feature_enabled = _CONF['image-feature-enabled']
         self.network = _CONF.network
         self.network_feature_enabled = _CONF['network-feature-enabled']
+        self.dashboard = _CONF.dashboard
         self.validation = _CONF.validation
         self.volume = _CONF.volume
         self.volume_feature_enabled = _CONF['volume-feature-enabled']
@@ -1265,6 +1371,7 @@
             'object-storage-feature-enabled']
         self.scenario = _CONF.scenario
         self.service_available = _CONF.service_available
+        self.enforce_scope = _CONF.enforce_scope
         self.debug = _CONF.debug
         logging.tempest_set_log_file('tempest.log')
         # Setting attributes for plugins
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 6a97a00..c1e6b2d 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -140,20 +140,10 @@
                "decorators.skip_because from tempest.lib")
 
 
-def _common_service_clients_check(logical_line, physical_line, filename,
-                                  ignored_list_file=None):
+def _common_service_clients_check(logical_line, physical_line, filename):
     if not re.match('tempest/(lib/)?services/.*', filename):
         return False
 
-    if ignored_list_file is not None:
-        ignored_list = []
-        with open('tempest/hacking/' + ignored_list_file) as f:
-            for line in f:
-                ignored_list.append(line.strip())
-
-        if filename in ignored_list:
-            return False
-
     if not METHOD.match(physical_line):
         return False
 
@@ -171,7 +161,7 @@
     T110
     """
     if not _common_service_clients_check(logical_line, physical_line,
-                                         filename, 'ignored_list_T110.txt'):
+                                         filename):
         return
 
     for line in lines[line_number:]:
@@ -199,7 +189,7 @@
     T111
     """
     if not _common_service_clients_check(logical_line, physical_line,
-                                         filename, 'ignored_list_T111.txt'):
+                                         filename):
         return
 
     for line in lines[line_number:]:
diff --git a/tempest/lib/api_schema/response/compute/v2_70/interfaces.py b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
new file mode 100644
index 0000000..3160b92
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import interfaces
+
+# ****** Schemas changed in microversion 2.70 *****************
+#
+# 1. add optional field 'tag' in the Response body of the following APIs:
+#    - GET /servers/{server_id}/os-interface
+#    - POST /servers/{server_id}/os-interface
+#    - GET /servers/{server_id}/os-interface/{port_id}
+
+get_create_interfaces = copy.deepcopy(interfaces.get_create_interfaces)
+get_create_interfaces['response_body']['properties']['interfaceAttachment'][
+    'properties'].update({'tag': {'type': ['string', 'null']}})
+
+list_interfaces = copy.deepcopy(interfaces.list_interfaces)
+list_interfaces['response_body']['properties']['interfaceAttachments'][
+    'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.1 ***
+delete_interface = copy.deepcopy(interfaces.delete_interface)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 5cf0f8a..f4c01ee 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -79,3 +79,6 @@
 check_tag_existence = copy.deepcopy(servers270.check_tag_existence)
 update_tag = copy.deepcopy(servers270.update_tag)
 delete_tag = copy.deepcopy(servers270.delete_tag)
+attach_volume = copy.deepcopy(servers270.attach_volume)
+show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index 6e491e9..ae7ebc4 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -76,3 +76,6 @@
 check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
 update_tag = copy.deepcopy(servers271.update_tag)
 delete_tag = copy.deepcopy(servers271.delete_tag)
+attach_volume = copy.deepcopy(servers271.attach_volume)
+show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
diff --git a/tempest/tests/lib/services/volume/v1/__init__.py b/tempest/lib/api_schema/response/compute/v2_79/__init__.py
similarity index 100%
rename from tempest/tests/lib/services/volume/v1/__init__.py
rename to tempest/lib/api_schema/response/compute/v2_79/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_79/servers.py b/tempest/lib/api_schema/response/compute/v2_79/servers.py
new file mode 100644
index 0000000..58dcba8
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_79/servers.py
@@ -0,0 +1,67 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_73 import servers as servers273
+
+
+###########################################################################
+#
+# 2.79:
+#
+# The delete_on_termination parameter is now returned in the response body
+# of the following calls:
+#
+# - GET /servers/{server_id}/os-volume_attachments
+# - GET /servers/{server_id}/os-volume_attachments/{volume_id}
+# - POST /servers/{server_id}/os-volume_attachments
+###########################################################################
+
+attach_volume = copy.deepcopy(servers273.attach_volume)
+attach_volume['response_body']['properties']['volumeAttachment'][
+    'properties'].update({'delete_on_termination': {'type': 'boolean'}})
+attach_volume['response_body']['properties']['volumeAttachment'][
+    'required'].append('delete_on_termination')
+
+show_volume_attachment = copy.deepcopy(servers273.show_volume_attachment)
+show_volume_attachment['response_body']['properties']['volumeAttachment'][
+    'properties'].update({'delete_on_termination': {'type': 'boolean'}})
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['required'].append('delete_on_termination')
+
+list_volume_attachments = copy.deepcopy(servers273.list_volume_attachments)
+list_volume_attachments['response_body']['properties']['volumeAttachments'][
+    'items']['properties'].update(
+        {'delete_on_termination': {'type': 'boolean'}})
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['required'].append('delete_on_termination')
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.73 ***
+rebuild_server = copy.deepcopy(servers273.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers273.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers273.update_server)
+get_server = copy.deepcopy(servers273.get_server)
+list_servers_detail = copy.deepcopy(servers273.list_servers_detail)
+list_servers = copy.deepcopy(servers273.list_servers)
+show_server_diagnostics = copy.deepcopy(servers273.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers273.get_remote_consoles)
+list_tags = copy.deepcopy(servers273.list_tags)
+update_all_tags = copy.deepcopy(servers273.update_all_tags)
+delete_all_tags = copy.deepcopy(servers273.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
+update_tag = copy.deepcopy(servers273.update_tag)
+delete_tag = copy.deepcopy(servers273.delete_tag)
diff --git a/tempest/lib/api_schema/response/volume/backups.py b/tempest/lib/api_schema/response/volume/backups.py
index 9e85f5f..cba7981 100644
--- a/tempest/lib/api_schema/response/volume/backups.py
+++ b/tempest/lib/api_schema/response/volume/backups.py
@@ -66,7 +66,7 @@
                 'properties': {
                     'id': {'type': 'string', 'format': 'uuid'},
                     'links': parameter_types.links,
-                    'name': {'type': 'string'},
+                    'name': {'type': ['string', 'null']},
                     # TODO(zhufl): metadata is added in 3.43, we should move it
                     # to the 3.43 schema file when microversion is supported
                     # in volume interfaces.
@@ -91,7 +91,7 @@
                 'properties': {
                     'id': {'type': 'string', 'format': 'uuid'},
                     'links': parameter_types.links,
-                    'name': {'type': 'string'},
+                    'name': {'type': ['string', 'null']},
                     'metadata': {'^.+$': {'type': 'string'}}
                 },
                 'additionalProperties': False,
diff --git a/tempest/lib/api_schema/response/volume/groups.py b/tempest/lib/api_schema/response/volume/groups.py
index cb31269..f6e4bc2 100644
--- a/tempest/lib/api_schema/response/volume/groups.py
+++ b/tempest/lib/api_schema/response/volume/groups.py
@@ -64,7 +64,10 @@
                         'type': 'array',
                         'items': {'type': 'string', 'format': 'uuid'}
                     },
-                    'replication_status': {'type': 'string'}
+                    # TODO(zhufl): replication_status is added in 3.38, we
+                    # should move it to the 3.38 schema file when microversion
+                    # is supported in volume interfaces
+                    'replication_status': {'type': ['string', 'null']}
                 },
                 'additionalProperties': False,
                 'required': ['status', 'description', 'created_at',
@@ -129,6 +132,10 @@
                             'type': 'array',
                             'items': {'type': 'string', 'format': 'uuid'}
                         },
+                        # TODO(zhufl): replication_status is added in 3.38, we
+                        # should move it to the 3.38 schema file when
+                        # microversion is supported in volume interfaces
+                        'replication_status': {'type': ['string', 'null']}
                     },
                     'additionalProperties': False,
                     'required': ['status', 'description', 'created_at',
diff --git a/tempest/lib/api_schema/response/volume/manage_volume.py b/tempest/lib/api_schema/response/volume/manage_volume.py
new file mode 100644
index 0000000..d3acfd9
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/manage_volume.py
@@ -0,0 +1,27 @@
+# Copyright 2018 ZTE Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.api_schema.response.volume import volumes
+
+
+manage_volume = {
+    'status_code': [202],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'volume': volumes.common_show_volume},
+        'additionalProperties': False,
+        'required': ['volume']
+    }
+}
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 3fee489..8bdf98e 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -18,10 +18,9 @@
 import copy
 import datetime
 import re
+from urllib import parse as urlparse
 
 from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
 
 from tempest.lib import exceptions
 from tempest.lib.services.identity.v2 import token_client as json_v2id
@@ -65,8 +64,7 @@
     return url
 
 
-@six.add_metaclass(abc.ABCMeta)
-class AuthProvider(object):
+class AuthProvider(object, metaclass=abc.ABCMeta):
     """Provide authentication"""
 
     SCOPES = set(['project'])
@@ -391,7 +389,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
@@ -428,7 +426,7 @@
 class KeystoneV3AuthProvider(KeystoneAuthProvider):
     """Provides authentication based on the Identity V3 API"""
 
-    SCOPES = set(['project', 'domain', 'unscoped', None])
+    SCOPES = set(['system', 'project', 'domain', 'unscoped', None])
 
     def _auth_client(self, auth_url):
         return json_v3id.V3TokenClient(
@@ -441,8 +439,8 @@
 
         Fields available in Credentials are passed to the token request,
         depending on the value of scope. Valid values for scope are: "project",
-        "domain". Any other string (e.g. "unscoped") or None will lead to an
-        unscoped token request.
+        "domain", or "system". Any other string (e.g. "unscoped") or None will
+        lead to an unscoped token request.
         """
 
         auth_params = dict(
@@ -465,12 +463,16 @@
                 domain_id=self.credentials.domain_id,
                 domain_name=self.credentials.domain_name)
 
+        if self.scope == 'system':
+            auth_params.update(system='all')
+
         return auth_params
 
     def _fill_credentials(self, auth_data_body):
-        # project or domain, depending on the scope
+        # project, domain, or system depending on the scope
         project = auth_data_body.get('project', None)
         domain = auth_data_body.get('domain', None)
+        system = auth_data_body.get('system', None)
         # user is always there
         user = auth_data_body['user']
         # Set project fields
@@ -490,6 +492,9 @@
                 self.credentials.domain_id = domain['id']
             if self.credentials.domain_name is None:
                 self.credentials.domain_name = domain['name']
+        # Set system scope
+        if system is not None:
+            self.credentials.system = 'all'
         # Set user fields
         if self.credentials.username is None:
             self.credentials.username = user['name']
@@ -524,7 +529,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
@@ -677,7 +682,8 @@
                 raise exceptions.InvalidCredentials(msg)
         for key in attr:
             if key in self.ATTRIBUTES:
-                setattr(self, key, attr[key])
+                if attr[key] is not None:
+                    setattr(self, key, attr[key])
             else:
                 msg = '%s is not a valid attr for %s' % (key, self.__class__)
                 raise exceptions.InvalidCredentials(msg)
@@ -779,7 +785,7 @@
     ATTRIBUTES = ['domain_id', 'domain_name', 'password', 'username',
                   'project_domain_id', 'project_domain_name', 'project_id',
                   'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
-                  'user_domain_name', 'user_id']
+                  'user_domain_name', 'user_id', 'system']
     COLLISIONS = [('project_name', 'tenant_name'), ('project_id', 'tenant_id')]
 
     def __setattr__(self, key, value):
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
 import subprocess
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import base
 import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
                     flags, action, params])
     cmd = cmd.strip()
     LOG.info("running: '%s'", cmd)
-    if six.PY2:
-        cmd = cmd.encode('utf-8')
     cmd = shlex.split(cmd)
     stdout = subprocess.PIPE
     stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
                                        cmd,
                                        result,
                                        result_err)
-    if six.PY2:
-        return result
-    else:
-        return os.fsdecode(result)
+    return os.fsdecode(result)
 
 
 class CLIClient(object):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 71ecb32..0ae11ca 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -16,19 +16,20 @@
 
 import argparse
 import ast
+import contextlib
 import importlib
 import inspect
 import os
 import sys
 import unittest
+import urllib.parse as urlparse
 import uuid
 
 from oslo_utils import uuidutils
-import six.moves.urllib.parse as urlparse
 
 DECORATOR_MODULE = 'decorators'
 DECORATOR_NAME = 'idempotent_id'
-DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE
+DECORATOR_IMPORT = 'tempest.lib.%s' % DECORATOR_MODULE
 IMPORT_LINE = 'from tempest.lib import %s' % DECORATOR_MODULE
 DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE,
                                         DECORATOR_NAME)
@@ -180,34 +181,124 @@
         elif isinstance(node, ast.ImportFrom):
             return '%s.%s' % (node.module, node.names[0].name)
 
+    @contextlib.contextmanager
+    def ignore_site_packages_paths(self):
+        """Removes site-packages directories from the sys.path
+
+        Source:
+            - StackOverflow: https://stackoverflow.com/questions/22195382/
+            - Author: https://stackoverflow.com/users/485844/
+        """
+
+        paths = sys.path
+        # remove all third-party paths
+        # so that only stdlib imports will succeed
+        sys.path = list(filter(
+            None,
+            filter(lambda i: 'site-packages' not in i, sys.path)
+        ))
+        yield
+        sys.path = paths
+
+    def is_std_lib(self, module):
+        """Checks whether the module is part of the stdlib or not
+
+        Source:
+            - StackOverflow: https://stackoverflow.com/questions/22195382/
+            - Author: https://stackoverflow.com/users/485844/
+        """
+
+        if module in sys.builtin_module_names:
+            return True
+
+        with self.ignore_site_packages_paths():
+            imported_module = sys.modules.pop(module, None)
+            try:
+                importlib.import_module(module)
+            except ImportError:
+                return False
+            else:
+                return True
+            finally:
+                if imported_module:
+                    sys.modules[module] = imported_module
+
     def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
-        with open(source_path) as f:
-            src_lines = f.read().split('\n')
-        line_no = 0
-        tempest_imports = [node for node in src_parsed.body
+        import_list = [node for node in src_parsed.body
+                       if isinstance(node, (ast.Import, ast.ImportFrom))]
+
+        if not import_list:
+            print("(WARNING) %s: The file is not valid as it does not contain "
+                  "any import line! Therefore the import needed by "
+                  "@decorators.idempotent_id is not added!" % source_path)
+            return
+
+        tempest_imports = [node for node in import_list
                            if self._import_name(node) and
                            'tempest.' in self._import_name(node)]
-        if not tempest_imports:
-            import_snippet = '\n'.join(('', IMPORT_LINE, ''))
-        else:
-            for node in tempest_imports:
-                if self._import_name(node) < DECORATOR_IMPORT:
-                    continue
-                else:
-                    line_no = node.lineno
-                    import_snippet = IMPORT_LINE
-                    break
+
+        for node in tempest_imports:
+            if self._import_name(node) < DECORATOR_IMPORT:
+                continue
             else:
-                line_no = tempest_imports[-1].lineno
-                while True:
-                    if (not src_lines[line_no - 1] or
-                            getattr(self._next_node(src_parsed.body,
-                                                    tempest_imports[-1]),
-                                    'lineno') == line_no or
-                            line_no == len(src_lines)):
-                        break
-                    line_no += 1
-                import_snippet = '\n'.join((IMPORT_LINE, ''))
+                line_no = node.lineno
+                break
+        else:
+            if tempest_imports:
+                line_no = tempest_imports[-1].lineno + 1
+
+        # Insert import line between existing tempest imports
+        if tempest_imports:
+            patcher.add_patch(source_path, IMPORT_LINE, line_no)
+            return
+
+        # Group space separated imports together
+        grouped_imports = {}
+        first_import_line = import_list[0].lineno
+        for idx, import_line in enumerate(import_list, first_import_line):
+            group_no = import_line.lineno - idx
+            group = grouped_imports.get(group_no, [])
+            group.append(import_line)
+            grouped_imports[group_no] = group
+
+        if len(grouped_imports) > 3:
+            print("(WARNING) %s: The file contains more than three import "
+                  "groups! This is not valid according to the PEP8 "
+                  "style guide. " % source_path)
+
+        # Divide grouped_imports into groupes based on PEP8 style guide
+        pep8_groups = {}
+        package_name = self.package.__name__.split(".")[0]
+        for key in grouped_imports:
+            module = self._import_name(grouped_imports[key][0]).split(".")[0]
+            if module.startswith(package_name):
+                group = pep8_groups.get('3rd_group', [])
+                pep8_groups['3rd_group'] = group + grouped_imports[key]
+            elif self.is_std_lib(module):
+                group = pep8_groups.get('1st_group', [])
+                pep8_groups['1st_group'] = group + grouped_imports[key]
+            else:
+                group = pep8_groups.get('2nd_group', [])
+                pep8_groups['2nd_group'] = group + grouped_imports[key]
+
+        for node in pep8_groups.get('2nd_group', []):
+            if self._import_name(node) < DECORATOR_IMPORT:
+                continue
+            else:
+                line_no = node.lineno
+                import_snippet = IMPORT_LINE
+                break
+        else:
+            if pep8_groups.get('2nd_group', []):
+                line_no = pep8_groups['2nd_group'][-1].lineno + 1
+                import_snippet = IMPORT_LINE
+            elif pep8_groups.get('1st_group', []):
+                line_no = pep8_groups['1st_group'][-1].lineno + 1
+                import_snippet = '\n' + IMPORT_LINE
+            else:
+                line_no = pep8_groups['3rd_group'][0].lineno
+                import_snippet = IMPORT_LINE + '\n\n'
+
         patcher.add_patch(source_path, import_snippet, line_no)
 
     def get_tests(self):
diff --git a/tempest/lib/cmd/skip_tracker.py b/tempest/lib/cmd/skip_tracker.py
index 87806b7..95376e3 100755
--- a/tempest/lib/cmd/skip_tracker.py
+++ b/tempest/lib/cmd/skip_tracker.py
@@ -31,10 +31,11 @@
 except ImportError:
     launchpad = None
 
-LPCACHEDIR = os.path.expanduser('~/.launchpadlib/cache')
+LPCACHEDIR = os.path.expanduser(os.path.join('~', '.launchpadlib', 'cache'))
 LOG = logging.getLogger(__name__)
 
-BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..'))
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                       '..', '..', '..'))
 TESTDIR = os.path.join(BASEDIR, 'tempest')
 
 
diff --git a/tempest/lib/common/api_version_utils.py b/tempest/lib/common/api_version_utils.py
index 80dbc1d..db5c8c3 100644
--- a/tempest/lib/common/api_version_utils.py
+++ b/tempest/lib/common/api_version_utils.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import testtools
 
 from tempest.lib.common import api_version_request
@@ -117,7 +116,7 @@
     :param response_header: Response header where microversion is
             expected to be present.
     """
-    if not isinstance(api_microversion, six.string_types):
+    if not isinstance(api_microversion, str):
         raise TypeError('api_microversion must be a string')
     api_microversion_header_name = api_microversion_header_name.lower()
     if (api_microversion_header_name not in response_header or
diff --git a/tempest/lib/common/cred_client.py b/tempest/lib/common/cred_client.py
index a81f53c..f13d6d0 100644
--- a/tempest/lib/common/cred_client.py
+++ b/tempest/lib/common/cred_client.py
@@ -13,7 +13,6 @@
 import abc
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import auth
 from tempest.lib import exceptions as lib_exc
@@ -22,8 +21,7 @@
 LOG = logging.getLogger(__name__)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class CredsClient(object):
+class CredsClient(object, metaclass=abc.ABCMeta):
     """This class is a wrapper around the identity clients
 
      to provide a single interface for managing credentials in both v2 and v3
@@ -39,11 +37,15 @@
         self.projects_client = projects_client
         self.roles_client = roles_client
 
-    def create_user(self, username, password, project, email):
+    def create_user(self, username, password, project=None, email=None):
         params = {'name': username,
-                  'password': password,
-                  self.project_id_param: project['id'],
-                  'email': email}
+                  'password': password}
+        # with keystone v3, a default project is not required
+        if project:
+            params[self.project_id_param] = project['id']
+        # email is not a first-class attribute of a user
+        if email:
+            params['email'] = email
         user = self.users_client.create_user(**params)
         if 'user' in user:
             user = user['user']
@@ -83,12 +85,15 @@
                       role['id'], project['id'], user['id'])
 
     @abc.abstractmethod
-    def get_credentials(self, user, project, password):
+    def get_credentials(
+            self, user, project, password, domain=None, system=None):
         """Produces a Credentials object from the details provided
 
         :param user: a user dict
-        :param project: a project dict
+        :param project: a project dict or None if using domain or system scope
         :param password: the password as a string
+        :param domain: a domain dict
+        :param system: a system dict
         :return: a Credentials object with all the available credential details
         """
         pass
@@ -116,7 +121,8 @@
     def delete_project(self, project_id):
         self.projects_client.delete_tenant(project_id)
 
-    def get_credentials(self, user, project, password):
+    def get_credentials(
+        self, user, project, password, domain=None, system=None):
         # User and project already include both ID and name here,
         # so there's no need to use the fill_in mode
         return auth.get_credentials(
@@ -156,23 +162,62 @@
     def delete_project(self, project_id):
         self.projects_client.delete_project(project_id)
 
-    def get_credentials(self, user, project, password):
+    def create_domain(self, name, description):
+        domain = self.domains_client.create_domain(
+            name=name, description=description)['domain']
+        return domain
+
+    def delete_domain(self, domain_id):
+        self.domains_client.update_domain(domain_id, enabled=False)
+        self.domains_client.delete_domain(domain_id)
+
+    def create_user(self, username, password, project=None, email=None,
+                    domain_id=None):
+        params = {'name': username,
+                  'password': password,
+                  'domain_id': domain_id or self.creds_domain['id']}
+        # with keystone v3, a default project is not required
+        if project:
+            params[self.project_id_param] = project['id']
+        # email is not a first-class attribute of a user
+        if email:
+            params['email'] = email
+        user = self.users_client.create_user(**params)
+        if 'user' in user:
+            user = user['user']
+        return user
+
+    def get_credentials(
+            self, user, project, password, domain=None, system=None):
         # User, project and domain already include both ID and name here,
         # so there's no need to use the fill_in mode.
         # NOTE(andreaf) We need to set all fields in the returned credentials.
         # Scope is then used to pick only those relevant for the type of
         # token needed by each service client.
+        if project:
+            project_name = project['name']
+            project_id = project['id']
+        else:
+            project_name = None
+            project_id = None
+        if domain:
+            domain_name = domain['name']
+            domain_id = domain['id']
+        else:
+            domain_name = self.creds_domain['name']
+            domain_id = self.creds_domain['id']
         return auth.get_credentials(
             auth_url=None,
             fill_in=False,
             identity_version='v3',
             username=user['name'], user_id=user['id'],
-            project_name=project['name'], project_id=project['id'],
+            project_name=project_name, project_id=project_id,
             password=password,
             project_domain_id=self.creds_domain['id'],
             project_domain_name=self.creds_domain['name'],
-            domain_id=self.creds_domain['id'],
-            domain_name=self.creds_domain['name'])
+            domain_id=domain_id,
+            domain_name=domain_name,
+            system=system)
 
     def assign_user_role_on_domain(self, user, role_name, domain=None):
         """Assign the specified role on a domain
@@ -197,6 +242,23 @@
             LOG.debug("Role %s already assigned on domain %s for user %s",
                       role['id'], domain['id'], user['id'])
 
+    def assign_user_role_on_system(self, user, role_name):
+        """Assign the specified role on the system
+
+        :param user: a user dict
+        :param role_name: name of the role to be assigned
+        """
+        role = self._check_role_exists(role_name)
+        if not role:
+            msg = 'No "%s" role found' % role_name
+            raise lib_exc.NotFound(msg)
+        try:
+            self.roles_client.create_user_role_on_system(
+                user['id'], role['id'])
+        except lib_exc.Conflict:
+            LOG.debug("Role %s already assigned on the system for user %s",
+                      role['id'], user['id'])
+
 
 def get_creds_client(identity_client,
                      projects_client,
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 42ed41b..069172a 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -14,14 +14,12 @@
 
 import abc
 
-import six
 
 from tempest.lib import auth
 from tempest.lib import exceptions
 
 
-@six.add_metaclass(abc.ABCMeta)
-class CredentialProvider(object):
+class CredentialProvider(object, metaclass=abc.ABCMeta):
     def __init__(self, identity_version, name=None,
                  network_resources=None, credentials_domain=None,
                  admin_role=None, identity_uri=None):
@@ -60,6 +58,54 @@
         return
 
     @abc.abstractmethod
+    def get_system_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_system_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_system_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_domain_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_admin_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_member_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_reader_creds(self):
+        return
+
+    @abc.abstractmethod
+    def get_project_alt_reader_creds(self):
+        return
+
+    @abc.abstractmethod
     def clear_creds(self):
         return
 
@@ -72,7 +118,7 @@
         return
 
     @abc.abstractmethod
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         return
 
     @abc.abstractmethod
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 8b82391..d86522a 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -16,7 +16,6 @@
 
 import netaddr
 from oslo_log import log as logging
-import six
 
 from tempest.lib.common import cred_client
 from tempest.lib.common import cred_provider
@@ -142,7 +141,14 @@
         else:
             # We use a dedicated client manager for identity client in case we
             # need a different token scope for them.
-            scope = 'domain' if self.identity_admin_domain_scope else 'project'
+            if self.default_admin_creds.system:
+                scope = 'system'
+            elif (self.identity_admin_domain_scope and
+                  (self.default_admin_creds.domain_id or
+                   self.default_admin_creds.domain_name)):
+                scope = 'domain'
+            else:
+                scope = 'project'
             identity_os = clients.ServiceClients(self.default_admin_creds,
                                                  self.identity_uri,
                                                  scope=scope)
@@ -157,62 +163,101 @@
                     os.network.PortsClient(),
                     os.network.SecurityGroupsClient())
 
-    def _create_creds(self, admin=False, roles=None):
+    def _create_creds(self, admin=False, roles=None, scope='project'):
         """Create credentials with random name.
 
-        Creates project and user. When admin flag is True create user
-        with admin role. Assign user with additional roles (for example
-        _member_) and roles requested by caller.
+        Creates user and role assignments on a project, domain, or system. When
+        the admin flag is True, creates user with the admin role on the
+        resource. If roles are provided, assigns those roles on the resource.
+        Otherwise, assigns the user the 'member' role on the resource.
 
         :param admin: Flag if to assign to the user admin role
         :type admin: bool
         :param roles: Roles to assign for the user
         :type roles: list
+        :param str scope: The scope for the role assignment, may be one of
+                          'project', 'domain', or 'system'.
         :return: Readonly Credentials with network resources
+        :raises: Exception if scope is invalid
         """
+        if not roles:
+            roles = []
         root = self.name
 
-        project_name = data_utils.rand_name(root, prefix=self.resource_prefix)
-        project_desc = project_name + "-desc"
-        project = self.creds_client.create_project(
-            name=project_name, description=project_desc)
+        cred_params = {
+            'project': None,
+            'domain': None,
+            'system': None
+        }
+        if scope == 'project':
+            project_name = data_utils.rand_name(
+                root, prefix=self.resource_prefix)
+            project_desc = project_name + '-desc'
+            project = self.creds_client.create_project(
+                name=project_name, description=project_desc)
 
-        # NOTE(andreaf) User and project can be distinguished from the context,
-        # having the same ID in both makes it easier to match them and debug.
-        username = project_name
-        user_password = data_utils.rand_password()
-        email = data_utils.rand_name(
-            root, prefix=self.resource_prefix) + "@example.com"
-        user = self.creds_client.create_user(
-            username, user_password, project, email)
-        role_assigned = False
+            # NOTE(andreaf) User and project can be distinguished from the
+            # context, having the same ID in both makes it easier to match them
+            # and debug.
+            username = project_name + '-project'
+            cred_params['project'] = project
+        elif scope == 'domain':
+            domain_name = data_utils.rand_name(
+                root, prefix=self.resource_prefix)
+            domain_desc = domain_name + '-desc'
+            domain = self.creds_client.create_domain(
+                name=domain_name, description=domain_desc)
+            username = domain_name + '-domain'
+            cred_params['domain'] = domain
+        elif scope == 'system':
+            prefix = data_utils.rand_name(root, prefix=self.resource_prefix)
+            username = prefix + '-system'
+            cred_params['system'] = 'all'
+        else:
+            raise lib_exc.InvalidScopeType(scope=scope)
         if admin:
-            self.creds_client.assign_user_role(user, project, self.admin_role)
-            role_assigned = True
+            username += '-admin'
+        elif roles and len(roles) == 1:
+            username += '-' + roles[0]
+        user_password = data_utils.rand_password()
+        cred_params['password'] = user_password
+        user = self.creds_client.create_user(
+            username, user_password)
+        cred_params['user'] = user
+        roles_to_assign = [r for r in roles]
+        if admin:
+            roles_to_assign.append(self.admin_role)
+            if scope == 'project':
+                self.creds_client.assign_user_role(
+                    user, project, self.identity_admin_role)
             if (self.identity_version == 'v3' and
                     self.identity_admin_domain_scope):
                 self.creds_client.assign_user_role_on_domain(
                     user, self.identity_admin_role)
         # Add roles specified in config file
-        for conf_role in self.extra_roles:
-            self.creds_client.assign_user_role(user, project, conf_role)
-            role_assigned = True
-        # Add roles requested by caller
-        if roles:
-            for role in roles:
-                self.creds_client.assign_user_role(user, project, role)
-                role_assigned = True
+        roles_to_assign.extend(self.extra_roles)
+        # If there are still no roles, default to 'member'
         # NOTE(mtreinish) For a user to have access to a project with v3 auth
         # it must beassigned a role on the project. So we need to ensure that
         # our newly created user has a role on the newly created project.
-        if self.identity_version == 'v3' and not role_assigned:
+        if not roles_to_assign and self.identity_version == 'v3':
+            roles_to_assign = ['member']
             try:
                 self.creds_client.create_user_role('member')
             except lib_exc.Conflict:
                 LOG.warning('member role already exists, ignoring conflict.')
-            self.creds_client.assign_user_role(user, project, 'member')
+        for role in roles_to_assign:
+            if scope == 'project':
+                self.creds_client.assign_user_role(user, project, role)
+            elif scope == 'domain':
+                self.creds_client.assign_user_role_on_domain(
+                    user, role, domain)
+            elif scope == 'system':
+                self.creds_client.assign_user_role_on_system(user, role)
+        LOG.info("Roles assigned to the user %s are: %s",
+                 user['id'], roles_to_assign)
 
-        creds = self.creds_client.get_credentials(user, project, user_password)
+        creds = self.creds_client.get_credentials(**cred_params)
         return cred_provider.TestResources(creds)
 
     def _create_network_resources(self, tenant_id):
@@ -296,7 +341,7 @@
                             tenant_id=tenant_id,
                             enable_dhcp=self.network_resources['dhcp'],
                             ip_version=(ipaddress.ip_network(
-                                six.text_type(subnet_cidr)).version))
+                                str(subnet_cidr)).version))
                 else:
                     resp_body = self.subnets_admin_client.\
                         create_subnet(network_id=network_id,
@@ -304,7 +349,7 @@
                                       name=subnet_name,
                                       tenant_id=tenant_id,
                                       ip_version=(ipaddress.ip_network(
-                                          six.text_type(subnet_cidr)).version))
+                                          str(subnet_cidr)).version))
                 break
             except lib_exc.BadRequest as e:
                 if 'overlaps with another subnet' not in str(e):
@@ -327,16 +372,38 @@
         self.routers_admin_client.add_router_interface(router_id,
                                                        subnet_id=subnet_id)
 
-    def get_credentials(self, credential_type):
-        if self._creds.get(str(credential_type)):
+    def get_credentials(self, credential_type, scope=None):
+        if not scope and self._creds.get(str(credential_type)):
             credentials = self._creds[str(credential_type)]
+        elif scope and (
+                self._creds.get("%s_%s" % (scope, str(credential_type)))):
+            credentials = self._creds["%s_%s" % (scope, str(credential_type))]
         else:
-            if credential_type in ['primary', 'alt', 'admin']:
+            LOG.debug("Creating new dynamic creds for scope: %s and "
+                      "credential_type: %s", scope, credential_type)
+            if scope:
+                if credential_type in [['admin'], ['alt_admin']]:
+                    credentials = self._create_creds(
+                        admin=True, scope=scope)
+                elif credential_type in [['alt_member'], ['alt_reader']]:
+                    cred_type = credential_type[0][4:]
+                    if isinstance(cred_type, str):
+                        cred_type = [cred_type]
+                    credentials = self._create_creds(
+                        roles=cred_type, scope=scope)
+                else:
+                    credentials = self._create_creds(
+                        roles=credential_type, scope=scope)
+            elif credential_type in ['primary', 'alt', 'admin']:
                 is_admin = (credential_type == 'admin')
                 credentials = self._create_creds(admin=is_admin)
             else:
                 credentials = self._create_creds(roles=credential_type)
-            self._creds[str(credential_type)] = credentials
+            if scope:
+                self._creds["%s_%s" %
+                            (scope, str(credential_type))] = credentials
+            else:
+                self._creds[str(credential_type)] = credentials
             # Maintained until tests are ported
             LOG.info("Acquired dynamic creds:\n"
                      " credentials: %s", credentials)
@@ -349,28 +416,73 @@
                          " credentials: %s", credentials)
         return credentials
 
+    # TODO(gmann): Remove this method in favor of get_project_member_creds()
+    # after the deprecation phase.
     def get_primary_creds(self):
         return self.get_credentials('primary')
 
+    # TODO(gmann): Remove this method in favor of get_project_admin_creds()
+    # after the deprecation phase.
     def get_admin_creds(self):
         return self.get_credentials('admin')
 
+    # TODO(gmann): Replace this method with more appropriate name.
+    # like get_project_alt_member_creds()
     def get_alt_creds(self):
         return self.get_credentials('alt')
 
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_system_admin_creds(self):
+        return self.get_credentials(['admin'], scope='system')
+
+    def get_system_member_creds(self):
+        return self.get_credentials(['member'], scope='system')
+
+    def get_system_reader_creds(self):
+        return self.get_credentials(['reader'], scope='system')
+
+    def get_domain_admin_creds(self):
+        return self.get_credentials(['admin'], scope='domain')
+
+    def get_domain_member_creds(self):
+        return self.get_credentials(['member'], scope='domain')
+
+    def get_domain_reader_creds(self):
+        return self.get_credentials(['reader'], scope='domain')
+
+    def get_project_admin_creds(self):
+        return self.get_credentials(['admin'], scope='project')
+
+    def get_project_alt_admin_creds(self):
+        return self.get_credentials(['alt_admin'], scope='project')
+
+    def get_project_member_creds(self):
+        return self.get_credentials(['member'], scope='project')
+
+    def get_project_alt_member_creds(self):
+        return self.get_credentials(['alt_member'], scope='project')
+
+    def get_project_reader_creds(self):
+        return self.get_credentials(['reader'], scope='project')
+
+    def get_project_alt_reader_creds(self):
+        return self.get_credentials(['alt_reader'], scope='project')
+
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         roles = list(set(roles))
         # The roles list as a str will become the index as the dict key for
         # the created credentials set in the dynamic_creds dict.
-        exist_creds = self._creds.get(str(roles))
+        creds_name = str(roles)
+        if scope:
+            creds_name = "%s_%s" % (scope, str(roles))
+        exist_creds = self._creds.get(creds_name)
         # If force_new flag is True 2 cred sets with the same roles are needed
         # handle this by creating a separate index for old one to store it
         # separately for cleanup
         if exist_creds and force_new:
-            new_index = str(roles) + '-' + str(len(self._creds))
+            new_index = creds_name + '-' + str(len(self._creds))
             self._creds[new_index] = exist_creds
-            del self._creds[str(roles)]
-        return self.get_credentials(roles)
+            del self._creds[creds_name]
+        return self.get_credentials(roles, scope=scope)
 
     def _clear_isolated_router(self, router_id, router_name):
         client = self.routers_admin_client
@@ -443,7 +555,7 @@
         if not self._creds:
             return
         self._clear_isolated_net_resources()
-        for creds in six.itervalues(self._creds):
+        for creds in self._creds.values():
             try:
                 self.creds_client.delete_user(creds.user_id)
             except lib_exc.NotFound:
@@ -465,6 +577,16 @@
             except lib_exc.NotFound:
                 LOG.warning("tenant with name: %s not found for delete",
                             creds.tenant_name)
+
+            # if cred is domain scoped, delete ephemeral domain
+            # do not delete default domain
+            if (hasattr(creds, 'domain_id') and
+                    creds.domain_id != creds.project_domain_id):
+                try:
+                    self.creds_client.delete_domain(creds.domain_id)
+                except lib_exc.NotFound:
+                    LOG.warning("domain with name: %s not found for delete",
+                                creds.domain_name)
         self._creds = {}
 
     def is_multi_user(self):
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index 8c1a802..33f871b 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import urllib3
 
 
@@ -89,7 +88,7 @@
                 for key, value in info.getheaders().items():
                     # We assume HTTP header name to be string, not random
                     # bytes, thus ensure we have string keys.
-                    self[six.u(key).lower()] = value
+                    self[str(key).lower()] = value
                 self.status = info.status
                 self['status'] = str(self.status)
                 self.reason = info.reason
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index bbf5e89..0ac757d 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -15,7 +15,6 @@
 import jsonschema
 from oslo_serialization import base64
 from oslo_utils import timeutils
-import six
 
 # JSON Schema validator and format checker used for JSON Schema validation
 JSONSCHEMA_VALIDATOR = jsonschema.Draft4Validator
@@ -43,7 +42,7 @@
 @jsonschema.FormatChecker.cls_checks('base64')
 def _validate_base64_format(instance):
     try:
-        if isinstance(instance, six.text_type):
+        if isinstance(instance, str):
             instance = instance.encode('utf-8')
         base64.decode_as_bytes(instance)
     except TypeError:
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 1011504..6d948cf 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -12,12 +12,11 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import os
 
 from oslo_concurrency import lockutils
 from oslo_log import log as logging
-import six
+from oslo_utils.secretutils import md5
 import yaml
 
 from tempest.lib import auth
@@ -104,15 +103,24 @@
         return hash_dict
 
     @classmethod
+    def _append_scoped_role(cls, scope, role, account_hash, hash_dict):
+        key = "%s_%s" % (scope, role)
+        hash_dict['scoped_roles'].setdefault(key, [])
+        hash_dict['scoped_roles'][key].append(account_hash)
+        return hash_dict
+
+    @classmethod
     def get_hash_dict(cls, accounts, admin_role,
                       object_storage_operator_role=None,
                       object_storage_reseller_admin_role=None):
-        hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
+        hash_dict = {'roles': {}, 'creds': {}, 'networks': {},
+                     'scoped_roles': {}}
 
         # Loop over the accounts read from the yaml file
         for account in accounts:
             roles = []
             types = []
+            scope = None
             resources = []
             if 'roles' in account:
                 roles = account.pop('roles')
@@ -120,15 +128,24 @@
                 types = account.pop('types')
             if 'resources' in account:
                 resources = account.pop('resources')
-            temp_hash = hashlib.md5()
+            if 'project_name' in account:
+                scope = 'project'
+            elif 'domain_name' in account:
+                scope = 'domain'
+            elif 'system' in account:
+                scope = 'system'
+            temp_hash = md5(usedforsecurity=False)
             account_for_hash = dict((k, v) for (k, v) in account.items()
                                     if k in cls.HASH_CRED_FIELDS)
-            temp_hash.update(six.text_type(account_for_hash).encode('utf-8'))
+            temp_hash.update(str(account_for_hash).encode('utf-8'))
             temp_hash_key = temp_hash.hexdigest()
             hash_dict['creds'][temp_hash_key] = account
             for role in roles:
                 hash_dict = cls._append_role(role, temp_hash_key,
                                              hash_dict)
+                if scope:
+                    hash_dict = cls._append_scoped_role(
+                        scope, role, temp_hash_key, hash_dict)
             # If types are set for the account append the matching role
             # subdict with the hash
             for type in types:
@@ -172,7 +189,7 @@
         return self.is_multi_user()
 
     def _create_hash_file(self, hash_string):
-        path = os.path.join(os.path.join(self.accounts_dir, hash_string))
+        path = os.path.join(self.accounts_dir, hash_string)
         if not os.path.isfile(path):
             with open(path, 'w') as fd:
                 fd.write(self.name)
@@ -194,25 +211,32 @@
             if res:
                 return _hash
             else:
-                path = os.path.join(os.path.join(self.accounts_dir,
-                                                 _hash))
+                path = os.path.join(self.accounts_dir, _hash)
                 with open(path, 'r') as fd:
                     names.append(fd.read())
         msg = ('Insufficient number of users provided. %s have allocated all '
                'the credentials for this allocation request' % ','.join(names))
         raise lib_exc.InvalidCredentials(msg)
 
-    def _get_match_hash_list(self, roles=None):
+    def _get_match_hash_list(self, roles=None, scope=None):
         hashes = []
         if roles:
             # Loop over all the creds for each role in the subdict and generate
             # a list of cred lists for each role
             for role in roles:
-                temp_hashes = self.hash_dict['roles'].get(role, None)
-                if not temp_hashes:
-                    raise lib_exc.InvalidCredentials(
-                        "No credentials with role: %s specified in the "
-                        "accounts ""file" % role)
+                if scope:
+                    key = "%s_%s" % (scope, role)
+                    temp_hashes = self.hash_dict['scoped_roles'].get(key)
+                    if not temp_hashes:
+                        raise lib_exc.InvalidCredentials(
+                            "No credentials matching role: %s, scope: %s "
+                            "specified in the accounts file" % (role, scope))
+                else:
+                    temp_hashes = self.hash_dict['roles'].get(role, None)
+                    if not temp_hashes:
+                        raise lib_exc.InvalidCredentials(
+                            "No credentials with role: %s specified in the "
+                            "accounts file" % role)
                 hashes.append(temp_hashes)
             # Take the list of lists and do a boolean and between each list to
             # find the creds which fall under all the specified roles
@@ -240,8 +264,8 @@
         temp_creds.pop('password')
         return temp_creds
 
-    def _get_creds(self, roles=None):
-        useable_hashes = self._get_match_hash_list(roles)
+    def _get_creds(self, roles=None, scope=None):
+        useable_hashes = self._get_match_hash_list(roles, scope)
         if not useable_hashes:
             msg = 'No users configured for type/roles %s' % roles
             raise lib_exc.InvalidCredentials(msg)
@@ -283,6 +307,8 @@
         self.remove_hash(_hash)
         LOG.info("%s returned allocated creds:\n%s", self.name, clean_creds)
 
+    # TODO(gmann): Remove this method in favor of get_project_member_creds()
+    # after the deprecation phase.
     def get_primary_creds(self):
         if self._creds.get('primary'):
             return self._creds.get('primary')
@@ -290,6 +316,8 @@
         self._creds['primary'] = net_creds
         return net_creds
 
+    # TODO(gmann): Replace this method with more appropriate name.
+    # like get_project_alt_member_creds()
     def get_alt_creds(self):
         if self._creds.get('alt'):
             return self._creds.get('alt')
@@ -297,9 +325,84 @@
         self._creds['alt'] = net_creds
         return net_creds
 
-    def get_creds_by_roles(self, roles, force_new=False):
+    def get_system_admin_creds(self):
+        if self._creds.get('system_admin'):
+            return self._creds.get('system_admin')
+        system_admin = self._get_creds(['admin'], scope='system')
+        self._creds['system_admin'] = system_admin
+        return system_admin
+
+    def get_system_member_creds(self):
+        if self._creds.get('system_member'):
+            return self._creds.get('system_member')
+        system_member = self._get_creds(['member'], scope='system')
+        self._creds['system_member'] = system_member
+        return system_member
+
+    def get_system_reader_creds(self):
+        if self._creds.get('system_reader'):
+            return self._creds.get('system_reader')
+        system_reader = self._get_creds(['reader'], scope='system')
+        self._creds['system_reader'] = system_reader
+        return system_reader
+
+    def get_domain_admin_creds(self):
+        if self._creds.get('domain_admin'):
+            return self._creds.get('domain_admin')
+        domain_admin = self._get_creds(['admin'], scope='domain')
+        self._creds['domain_admin'] = domain_admin
+        return domain_admin
+
+    def get_domain_member_creds(self):
+        if self._creds.get('domain_member'):
+            return self._creds.get('domain_member')
+        domain_member = self._get_creds(['member'], scope='domain')
+        self._creds['domain_member'] = domain_member
+        return domain_member
+
+    def get_domain_reader_creds(self):
+        if self._creds.get('domain_reader'):
+            return self._creds.get('domain_reader')
+        domain_reader = self._get_creds(['reader'], scope='domain')
+        self._creds['domain_reader'] = domain_reader
+        return domain_reader
+
+    def get_project_admin_creds(self):
+        if self._creds.get('project_admin'):
+            return self._creds.get('project_admin')
+        project_admin = self._get_creds(['admin'], scope='project')
+        self._creds['project_admin'] = project_admin
+        return project_admin
+
+    def get_project_alt_admin_creds(self):
+        # TODO(gmann): Implement alt admin hash.
+        return
+
+    def get_project_member_creds(self):
+        if self._creds.get('project_member'):
+            return self._creds.get('project_member')
+        project_member = self._get_creds(['member'], scope='project')
+        self._creds['project_member'] = project_member
+        return project_member
+
+    def get_project_alt_member_creds(self):
+        # TODO(gmann): Implement alt member hash.
+        return
+
+    def get_project_reader_creds(self):
+        if self._creds.get('project_reader'):
+            return self._creds.get('project_reader')
+        project_reader = self._get_creds(['reader'], scope='project')
+        self._creds['project_reader'] = project_reader
+        return project_reader
+
+    def get_project_alt_reader_creds(self):
+        # TODO(gmann): Implement alt reader hash.
+        return
+
+    def get_creds_by_roles(self, roles, force_new=False, scope=None):
         roles = list(set(roles))
-        exist_creds = self._creds.get(six.text_type(roles).encode(
+        exist_creds = self._creds.get(str(roles).encode(
             'utf-8'), None)
         # The force kwarg is used to allocate an additional set of creds with
         # the same role list. The index used for the previously allocation
@@ -309,17 +412,19 @@
         elif exist_creds and force_new:
             # NOTE(andreaf) In py3.x encode returns bytes, and b'' is bytes
             # In py2.7 encode returns strings, and b'' is still string
-            new_index = six.text_type(roles).encode('utf-8') + b'-' + \
-                six.text_type(len(self._creds)).encode('utf-8')
+            new_index = str(roles).encode('utf-8') + b'-' + \
+                str(len(self._creds)).encode('utf-8')
             self._creds[new_index] = exist_creds
         net_creds = self._get_creds(roles=roles)
-        self._creds[six.text_type(roles).encode('utf-8')] = net_creds
+        self._creds[str(roles).encode('utf-8')] = net_creds
         return net_creds
 
     def clear_creds(self):
         for creds in self._creds.values():
             self.remove_credentials(creds)
 
+    # TODO(gmann): Remove this method in favor of get_project_admin_creds()
+    # after the deprecation phase.
     def get_admin_creds(self):
         return self.get_creds_by_roles([self.admin_role])
 
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 0513e90..573d64e 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -18,13 +18,12 @@
 import email.utils
 import re
 import time
+import urllib
 
 import jsonschema
 from oslo_log import log as logging
 from oslo_log import versionutils
 from oslo_serialization import jsonutils as json
-import six
-from six.moves import urllib
 
 from tempest.lib.common import http
 from tempest.lib.common import jsonschema_validator
@@ -104,16 +103,18 @@
                                        'location', 'proxy-authenticate',
                                        'retry-after', 'server',
                                        'vary', 'www-authenticate'))
-        dscv = disable_ssl_certificate_validation
+        self.dscv = disable_ssl_certificate_validation
 
         if proxy_url:
             self.http_obj = http.ClosingProxyHttp(
                 proxy_url,
-                disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+                disable_ssl_certificate_validation=self.dscv,
+                ca_certs=ca_certs,
                 timeout=http_timeout, follow_redirects=follow_redirects)
         else:
             self.http_obj = http.ClosingHttp(
-                disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+                disable_ssl_certificate_validation=self.dscv,
+                ca_certs=ca_certs,
                 timeout=http_timeout, follow_redirects=follow_redirects)
 
     def get_headers(self, accept_type=None, send_type=None):
@@ -416,7 +417,7 @@
     def _safe_body(self, body, maxlen=4096):
         # convert a structure into a string safely
         try:
-            text = six.text_type(body)
+            text = str(body)
         except UnicodeDecodeError:
             # if this isn't actually text, return marker that
             return "<BinaryData: removed>"
@@ -507,7 +508,7 @@
             if not hasattr(body, "keys") or len(body.keys()) != 1:
                 return body
             # Just return the "wrapped" element
-            first_key, first_item = six.next(six.iteritems(body))
+            _, first_item = tuple(body.items())[0]
             if isinstance(first_item, (dict, list)):
                 return first_item
         except (ValueError, IndexError):
@@ -888,7 +889,7 @@
             return True
         return 'exceed' in resp_body.get('message', 'blabla')
 
-    def wait_for_resource_deletion(self, id):
+    def wait_for_resource_deletion(self, id, *args, **kwargs):
         """Waits for a resource to be deleted
 
         This method will loop over is_resource_deleted until either
@@ -901,13 +902,18 @@
         """
         start_time = int(time.time())
         while True:
-            if self.is_resource_deleted(id):
+            if self.is_resource_deleted(id, *args, **kwargs):
                 return
             if int(time.time()) - start_time >= self.build_timeout:
                 message = ('Failed to delete %(resource_type)s %(id)s within '
-                           'the required time (%(timeout)s s).' %
+                           'the required time (%(timeout)s s). Timer started '
+                           'at %(start_time)s. Timer ended at %(end_time)s'
+                           'waited for %(wait_time)s' %
                            {'resource_type': self.resource_type, 'id': id,
-                            'timeout': self.build_timeout})
+                            'timeout': self.build_timeout,
+                            'start_time': start_time,
+                            'end_time': int(time.time()),
+                            'wait_time': int(time.time()) - start_time})
                 caller = test_utils.find_test_caller()
                 if caller:
                     message = '(%s) %s' % (caller, message)
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index 3a05f27..ee15375 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -14,13 +14,13 @@
 #    under the License.
 
 
+import io
 import select
 import socket
 import time
 import warnings
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import exceptions
 
@@ -65,9 +65,9 @@
         self.username = username
         self.port = port
         self.password = password
-        if isinstance(pkey, six.string_types):
+        if isinstance(pkey, str):
             pkey = paramiko.RSAKey.from_private_key(
-                six.StringIO(str(pkey)))
+                io.StringIO(str(pkey)))
         self.pkey = pkey
         self.look_for_keys = look_for_keys
         self.key_filename = key_filename
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index b47d40d..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,13 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
-if six.PY2:
-    # module thread is removed in Python 3
-    from thread import get_ident  # noqa: H237,F401
-
-else:
-    # On Python3 thread module has been deprecated and get_ident has been moved
-    # to threading module
-    from threading import get_ident  # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident  # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 7f94612..1e94f86 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -19,7 +19,6 @@
 import uuid
 
 from oslo_utils import uuidutils
-import six.moves
 
 
 def rand_uuid():
@@ -129,7 +128,7 @@
     :rtype: string
     """
     guid = []
-    for i in range(8):
+    for _ in range(8):
         guid.append("%02x" % random.randint(0x00, 0xff))
     return ':'.join(guid)
 
@@ -169,7 +168,9 @@
     :return: size randomly bytes
     :rtype: string
     """
-    return b''.join([six.int2byte(random.randint(0, 255))
+    if size > 1 << 20:
+        raise RuntimeError('Size should be less than 1MiB')
+    return b''.join([bytes((random.randint(0, 255),))
                      for i in range(size)])
 
 
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index 71fed02..d84dd28 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -15,7 +15,6 @@
 
 import netaddr
 from oslo_log import log as logging
-import six
 
 from tempest.lib.common import ssh
 from tempest.lib.common.utils import test_utils
@@ -55,8 +54,8 @@
                             except Exception:
                                 msg = 'Could not get console_log for server %s'
                                 LOG.debug(msg, self.server['id'])
-                    # re-raise the original ssh timeout exception
-                    six.reraise(*original_exception)
+                    # raise the original ssh timeout exception
+                    raise
                 finally:
                     # Delete the traceback to avoid circular references
                     _, _, trace = original_exception
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index 2a9f3a9..4cf8351 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -80,10 +80,19 @@
 
 def call_and_ignore_notfound_exc(func, *args, **kwargs):
     """Call the given function and pass if a `NotFound` exception is raised."""
-    try:
-        return func(*args, **kwargs)
-    except exceptions.NotFound:
-        pass
+    attempt = 0
+    while True:
+        attempt += 1
+        try:
+            return func(*args, **kwargs)
+        except exceptions.NotFound:
+            return
+        except exceptions.ServerFault:
+            # NOTE(danms): Tolerate three ServerFault exceptions while trying
+            # to do this thing, and after that, assume it's legit.
+            if attempt >= 3:
+                raise
+            LOG.warning('Got ServerFault while running %s, retrying...', func)
 
 
 def call_until_true(func, duration, sleep_for, *args, **kwargs):
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index 808e0fb..a4633ca 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -16,7 +16,6 @@
 import uuid
 
 from oslo_log import log as logging
-import six
 import testtools
 
 from tempest.lib import exceptions as lib_exc
@@ -72,19 +71,13 @@
     def decorator(f):
         @functools.wraps(f)
         def wrapper(*func_args, **func_kwargs):
-            skip = False
-            msg = ''
-            if "condition" in kwargs:
-                if kwargs["condition"] is True:
-                    skip = True
-            else:
-                skip = True
-            if "bug" in kwargs and skip is True:
-                bug = kwargs['bug']
+            condition = kwargs.get('condition', True)
+            bug = kwargs.get('bug', None)
+            if bug and condition:
                 bug_type = kwargs.get('bug_type', 'launchpad')
                 bug_url = _get_bug_url(bug, bug_type)
-                msg = "Skipped until bug: %s is resolved." % bug_url
-                raise testtools.TestCase.skipException(msg)
+                raise testtools.TestCase.skipException(
+                    "Skipped until bug: %s is resolved." % bug_url)
             return f(*func_args, **func_kwargs)
         return wrapper
     return decorator
@@ -116,7 +109,7 @@
 
 def idempotent_id(id):
     """Stub for metadata decorator"""
-    if not isinstance(id, six.string_types):
+    if not isinstance(id, str):
         raise TypeError('Test idempotent_id must be string not %s'
                         '' % type(id).__name__)
     uuid.UUID(id)
@@ -124,7 +117,7 @@
     def decorator(f):
         f = testtools.testcase.attr('id-%s' % id)(f)
         if f.__doc__:
-            f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
+            f.__doc__ = 'Test idempotent id: %s\n\n%s' % (id, f.__doc__)
         else:
             f.__doc__ = 'Test idempotent id: %s' % id
         return f
@@ -146,7 +139,7 @@
         # Check to see if the attr should be conditional applied.
         if 'condition' in kwargs and not kwargs.get('condition'):
             return f
-        if 'type' in kwargs and isinstance(kwargs['type'], six.string_types):
+        if 'type' in kwargs and isinstance(kwargs['type'], str):
             f = testtools.testcase.attr(kwargs['type'])(f)
         elif 'type' in kwargs and isinstance(kwargs['type'], list):
             for attr in kwargs['type']:
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 84b7ee6..abe68d2 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -294,3 +294,7 @@
 class ConsistencyGroupSnapshotException(TempestException):
     message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
                "in ERROR status")
+
+
+class InvalidScopeType(TempestException):
+    message = "Invalid scope %(scope)s"
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 90debd9..8b5c758 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -52,7 +52,6 @@
         'image.v2': image.v2,
         'network': network,
         'object-storage': object_storage,
-        'volume.v1': volume.v1,
         'volume.v2': volume.v2,
         'volume.v3': volume.v3
     }
@@ -257,7 +256,7 @@
     # class should only be used by tests hosted in Tempest.
 
     @removals.removed_kwarg('client_parameters')
-    def __init__(self, credentials, identity_uri, region=None, scope='project',
+    def __init__(self, credentials, identity_uri, region=None, scope=None,
                  disable_ssl_certificate_validation=True, ca_certs=None,
                  trace_requests='', client_parameters=None, proxy_url=None):
         """Service Clients provider
@@ -348,6 +347,14 @@
         self.ca_certs = ca_certs
         self.trace_requests = trace_requests
         self.proxy_url = proxy_url
+        if self.credentials.project_id or self.credentials.project_name:
+            scope = 'project'
+        elif self.credentials.system:
+            scope = 'system'
+        elif self.credentials.domain_id or self.credentials.domain_name:
+            scope = 'domain'
+        else:
+            scope = 'project'
         # Creates an auth provider for the credentials
         self.auth_provider = auth_provider_class(
             self.credentials, self.identity_uri, scope=scope,
diff --git a/tempest/lib/services/compute/__init__.py b/tempest/lib/services/compute/__init__.py
index 91e896a..8d07a45 100644
--- a/tempest/lib/services/compute/__init__.py
+++ b/tempest/lib/services/compute/__init__.py
@@ -14,6 +14,8 @@
 
 from tempest.lib.services.compute.agents_client import AgentsClient
 from tempest.lib.services.compute.aggregates_client import AggregatesClient
+from tempest.lib.services.compute.assisted_volume_snapshots_client import \
+    AssistedVolumeSnapshotsClient
 from tempest.lib.services.compute.availability_zone_client import \
     AvailabilityZoneClient
 from tempest.lib.services.compute.baremetal_nodes_client import \
@@ -63,9 +65,10 @@
 from tempest.lib.services.compute.volumes_client import \
     VolumesClient
 
-__all__ = ['AgentsClient', 'AggregatesClient', 'AvailabilityZoneClient',
-           'BaremetalNodesClient', 'CertificatesClient', 'ExtensionsClient',
-           'FixedIPsClient', 'FlavorsClient', 'FloatingIPPoolsClient',
+__all__ = ['AgentsClient', 'AggregatesClient', 'AssistedVolumeSnapshotsClient',
+           'AvailabilityZoneClient', 'BaremetalNodesClient',
+           'CertificatesClient', 'ExtensionsClient', 'FixedIPsClient',
+           'FlavorsClient', 'FloatingIPPoolsClient',
            'FloatingIPsBulkClient', 'FloatingIPsClient', 'HostsClient',
            'HypervisorClient', 'ImagesClient', 'InstanceUsagesAuditLogClient',
            'InterfacesClient', 'KeyPairsClient', 'LimitsClient',
diff --git a/tempest/lib/services/compute/agents_client.py b/tempest/lib/services/compute/agents_client.py
index 12b3900..bd973dd 100644
--- a/tempest/lib/services/compute/agents_client.py
+++ b/tempest/lib/services/compute/agents_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import agents as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/assisted_volume_snapshots_client.py b/tempest/lib/services/compute/assisted_volume_snapshots_client.py
new file mode 100644
index 0000000..7a949df
--- /dev/null
+++ b/tempest/lib/services/compute/assisted_volume_snapshots_client.py
@@ -0,0 +1,64 @@
+# Copyright 2017 AT&T Corp
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.compute import base_compute_client
+
+
+class AssistedVolumeSnapshotsClient(base_compute_client.BaseComputeClient):
+    """Service client for assisted volume snapshots"""
+
+    def delete_assisted_volume_snapshot(self, volume_id, snapshot_id):
+        """Delete snapshot for the given volume id.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#delete-assisted-volume-snapshot
+
+        :param volume_id: UUID of the volume
+        :param snapshot_id: The UUID of the snapshot
+        """
+        query_param = {'delete_info': json.dumps({'volume_id': volume_id})}
+        resp, body = self.delete("os-assisted-volume-snapshots/%s?%s"
+                                 % (snapshot_id,
+                                    urllib.urlencode(query_param)))
+        return rest_client.ResponseBody(resp, body)
+
+    def create_assisted_volume_snapshot(self, volume_id, snapshot_id,
+                                        **kwargs):
+        """Create a new assisted volume snapshot.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#create-assisted-volume-snapshots
+
+        :param volume_id: the source volume ID
+        :param snapshot_id: the UUID for a snapshot
+        :param type: Type of snapshot, such as qcow2
+        :param new_file: The name of image file that will be created
+        """
+        url = "os-assisted-volume-snapshots"
+        info = {"snapshot_id": snapshot_id}
+        if kwargs:
+            info.update(kwargs)
+        body = {"snapshot": {"volume_id": volume_id, "create_info": info}}
+        post_body = json.dumps(body)
+        resp, body = self.post(url, post_body)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/compute/baremetal_nodes_client.py b/tempest/lib/services/compute/baremetal_nodes_client.py
index 3efdbce..83af451 100644
--- a/tempest/lib/services/compute/baremetal_nodes_client.py
+++ b/tempest/lib/services/compute/baremetal_nodes_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import baremetal_nodes \
     as schema
diff --git a/tempest/lib/services/compute/flavors_client.py b/tempest/lib/services/compute/flavors_client.py
index e22b5b2..5282405 100644
--- a/tempest/lib/services/compute/flavors_client.py
+++ b/tempest/lib/services/compute/flavors_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import flavors as schema
 from tempest.lib.api_schema.response.compute.v2_1 import flavors_access \
diff --git a/tempest/lib/services/compute/floating_ip_pools_client.py b/tempest/lib/services/compute/floating_ip_pools_client.py
index d3af050..aa065b8 100644
--- a/tempest/lib/services/compute/floating_ip_pools_client.py
+++ b/tempest/lib/services/compute/floating_ip_pools_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/floating_ips_client.py b/tempest/lib/services/compute/floating_ips_client.py
index d7a1a9b..e6b6916 100644
--- a/tempest/lib/services/compute/floating_ips_client.py
+++ b/tempest/lib/services/compute/floating_ips_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import floating_ips as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/hosts_client.py b/tempest/lib/services/compute/hosts_client.py
index 743b4ec..bbecc3b 100644
--- a/tempest/lib/services/compute/hosts_client.py
+++ b/tempest/lib/services/compute/hosts_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import hosts as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/images_client.py b/tempest/lib/services/compute/images_client.py
index b252ee9..b6d8d30 100644
--- a/tempest/lib/services/compute/images_client.py
+++ b/tempest/lib/services/compute/images_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import images as schema
 from tempest.lib.api_schema.response.compute.v2_45 import images as schemav245
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index e1c02fa..9244a4a 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -16,15 +16,22 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema
+from tempest.lib.api_schema.response.compute.v2_70 import interfaces as \
+    schemav270
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
 
 class InterfacesClient(base_compute_client.BaseComputeClient):
 
+    schema_versions_info = [
+        {'min': None, 'max': '2.69', 'schema': schema},
+        {'min': '2.70', 'max': None, 'schema': schemav270}]
+
     def list_interfaces(self, server_id):
         resp, body = self.get('servers/%s/os-interface' % server_id)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.list_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -40,6 +47,7 @@
         resp, body = self.post('servers/%s/os-interface' % server_id,
                                body=post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.get_create_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -47,6 +55,7 @@
         resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
                                                               port_id))
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.get_create_interfaces, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/compute/keypairs_client.py b/tempest/lib/services/compute/keypairs_client.py
index 47cf2d0..9d7b7fc 100644
--- a/tempest/lib/services/compute/keypairs_client.py
+++ b/tempest/lib/services/compute/keypairs_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import keypairs as schemav21
 from tempest.lib.api_schema.response.compute.v2_2 import keypairs as schemav22
diff --git a/tempest/lib/services/compute/migrations_client.py b/tempest/lib/services/compute/migrations_client.py
index 812dc96..8a6e62a 100644
--- a/tempest/lib/services/compute/migrations_client.py
+++ b/tempest/lib/services/compute/migrations_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import migrations as schema
 from tempest.lib.api_schema.response.compute.v2_23 import migrations \
diff --git a/tempest/lib/services/compute/quotas_client.py b/tempest/lib/services/compute/quotas_client.py
index 12e865e..dd796aa 100644
--- a/tempest/lib/services/compute/quotas_client.py
+++ b/tempest/lib/services/compute/quotas_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import quotas as schema
 from tempest.lib.api_schema.response.compute.v2_36 import quotas as schemav236
diff --git a/tempest/lib/services/compute/security_groups_client.py b/tempest/lib/services/compute/security_groups_client.py
index 9493144..0bba990 100644
--- a/tempest/lib/services/compute/security_groups_client.py
+++ b/tempest/lib/services/compute/security_groups_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import \
     security_groups as schema
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 6723516..e58890c 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -16,9 +16,9 @@
 #    under the License.
 
 import copy
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import \
     security_groups as security_groups_schema
@@ -36,6 +36,7 @@
 from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
 from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
 from tempest.lib.api_schema.response.compute.v2_73 import servers as schemav273
+from tempest.lib.api_schema.response.compute.v2_79 import servers as schemav279
 from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
 from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
 from tempest.lib.common import rest_client
@@ -61,7 +62,8 @@
         {'min': '2.63', 'max': '2.69', 'schema': schemav263},
         {'min': '2.70', 'max': '2.70', 'schema': schemav270},
         {'min': '2.71', 'max': '2.72', 'schema': schemav271},
-        {'min': '2.73', 'max': None, 'schema': schemav273}]
+        {'min': '2.73', 'max': '2.78', 'schema': schemav273},
+        {'min': '2.79', 'max': None, 'schema': schemav279}]
 
     def __init__(self, auth_provider, service, region,
                  enable_instance_password=True, **kwargs):
@@ -646,7 +648,7 @@
 
         For a full list of available parameters, please refer to the official
         API reference:
-        https://docs.openstack.org/api-ref/compute/#create-remote-console
+        https://docs.openstack.org/api-ref/compute/#create-console
         """
         param = {
             'remote_console': {
diff --git a/tempest/lib/services/compute/services_client.py b/tempest/lib/services/compute/services_client.py
index 4e3383f..7d9f3e2 100644
--- a/tempest/lib/services/compute/services_client.py
+++ b/tempest/lib/services/compute/services_client.py
@@ -14,8 +14,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import services as schema
 from tempest.lib.api_schema.response.compute.v2_11 import services \
diff --git a/tempest/lib/services/compute/snapshots_client.py b/tempest/lib/services/compute/snapshots_client.py
index 225eb8d..2e6f7cf 100644
--- a/tempest/lib/services/compute/snapshots_client.py
+++ b/tempest/lib/services/compute/snapshots_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import snapshots as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/tenant_usages_client.py b/tempest/lib/services/compute/tenant_usages_client.py
index a34730c..b47d917 100644
--- a/tempest/lib/services/compute/tenant_usages_client.py
+++ b/tempest/lib/services/compute/tenant_usages_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import tenant_usages
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/compute/volumes_client.py b/tempest/lib/services/compute/volumes_client.py
index 11282ee..52172ed 100644
--- a/tempest/lib/services/compute/volumes_client.py
+++ b/tempest/lib/services/compute/volumes_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.compute.v2_1 import volumes as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/identity/v2/identity_client.py b/tempest/lib/services/identity/v2/identity_client.py
index d7526f3..6239ba6 100644
--- a/tempest/lib/services/identity/v2/identity_client.py
+++ b/tempest/lib/services/identity/v2/identity_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/roles_client.py b/tempest/lib/services/identity/v2/roles_client.py
index a133fc3..1580c33 100644
--- a/tempest/lib/services/identity/v2/roles_client.py
+++ b/tempest/lib/services/identity/v2/roles_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/services_client.py b/tempest/lib/services/identity/v2/services_client.py
index fc51cb4..2a0e5ca 100644
--- a/tempest/lib/services/identity/v2/services_client.py
+++ b/tempest/lib/services/identity/v2/services_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/tenants_client.py b/tempest/lib/services/identity/v2/tenants_client.py
index 09618ad..3435835 100644
--- a/tempest/lib/services/identity/v2/tenants_client.py
+++ b/tempest/lib/services/identity/v2/tenants_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v2/token_client.py b/tempest/lib/services/identity/v2/token_client.py
index 9f10f58..1191154 100644
--- a/tempest/lib/services/identity/v2/token_client.py
+++ b/tempest/lib/services/identity/v2/token_client.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
@@ -125,15 +124,3 @@
             return body['token']['id'], body
         else:
             return body['token']['id']
-
-
-class TokenClientJSON(TokenClient):
-    LOG = logging.getLogger(__name__)
-
-    def _warn(self):
-        self.LOG.warning("%s class was deprecated and renamed to %s",
-                         self.__class__.__name__, 'TokenClient')
-
-    def __init__(self, *args, **kwargs):
-        self._warn()
-        super(TokenClientJSON, self).__init__(*args, **kwargs)
diff --git a/tempest/lib/services/identity/v2/users_client.py b/tempest/lib/services/identity/v2/users_client.py
index 72f29be..c3217c9 100644
--- a/tempest/lib/services/identity/v2/users_client.py
+++ b/tempest/lib/services/identity/v2/users_client.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index da1c51c..86fa991 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+from tempest.lib.services.identity.v3.access_rules_client import \
+    AccessRulesClient
 from tempest.lib.services.identity.v3.application_credentials_client import \
     ApplicationCredentialsClient
 from tempest.lib.services.identity.v3.catalog_client import \
@@ -48,9 +50,10 @@
 from tempest.lib.services.identity.v3.users_client import UsersClient
 from tempest.lib.services.identity.v3.versions_client import VersionsClient
 
-__all__ = ['ApplicationCredentialsClient', 'CatalogClient',
-           'CredentialsClient', 'DomainsClient', 'DomainConfigurationClient',
-           'EndPointGroupsClient', 'EndPointsClient', 'EndPointsFilterClient',
+__all__ = ['AccessRulesClient', 'ApplicationCredentialsClient',
+           'CatalogClient', 'CredentialsClient', 'DomainsClient',
+           'DomainConfigurationClient', 'EndPointGroupsClient',
+           'EndPointsClient', 'EndPointsFilterClient',
            'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
            'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
            'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
diff --git a/tempest/lib/services/identity/v3/access_rules_client.py b/tempest/lib/services/identity/v3/access_rules_client.py
new file mode 100644
index 0000000..c3be5df
--- /dev/null
+++ b/tempest/lib/services/identity/v3/access_rules_client.py
@@ -0,0 +1,69 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
+"""
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class AccessRulesClient(rest_client.RestClient):
+    api_version = "v3"
+
+    def show_access_rule(self, user_id, access_rule_id):
+        """Gets details of an access rule.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#show-access-rule-details
+        """
+        resp, body = self.get('users/%s/access_rules/%s' %
+                              (user_id, access_rule_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_access_rules(self, user_id, **params):
+        """Lists out all of a user's access rules.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#list-access-rules
+        """
+        url = 'users/%s/access_rules' % user_id
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_access_rule(self, user_id, access_rule_id):
+        """Deletes an access rule.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3/index.html#delete-access-rule
+        """
+        resp, body = self.delete('users/%s/access_rules/%s' %
+                                 (user_id, access_rule_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/application_credentials_client.py b/tempest/lib/services/identity/v3/application_credentials_client.py
index be2e172..e7f3ac2 100644
--- a/tempest/lib/services/identity/v3/application_credentials_client.py
+++ b/tempest/lib/services/identity/v3/application_credentials_client.py
@@ -18,8 +18,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/credentials_client.py b/tempest/lib/services/identity/v3/credentials_client.py
index 3f4b40e..27f6156 100644
--- a/tempest/lib/services/identity/v3/credentials_client.py
+++ b/tempest/lib/services/identity/v3/credentials_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#credentials
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/domains_client.py b/tempest/lib/services/identity/v3/domains_client.py
index bd32cfc..c1d1980 100644
--- a/tempest/lib/services/identity/v3/domains_client.py
+++ b/tempest/lib/services/identity/v3/domains_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/endpoint_filter_client.py b/tempest/lib/services/identity/v3/endpoint_filter_client.py
index ce84869..2d5c8c9 100644
--- a/tempest/lib/services/identity/v3/endpoint_filter_client.py
+++ b/tempest/lib/services/identity/v3/endpoint_filter_client.py
@@ -66,3 +66,57 @@
             % (project_id, endpoint_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
+
+    def list_endpoint_groups_for_project(self, project_id):
+        """List Endpoint Groups Associated with Project."""
+        resp, body = self.get(
+            self.ep_filter + '/projects/%s/endpoint_groups'
+            % project_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_projects_for_endpoint_group(self, endpoint_group_id):
+        """List Projects Associated with Endpoint Group."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/projects'
+            % endpoint_group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_endpoints_for_endpoint_group(self, endpoint_group_id):
+        """List Endpoints Associated with Endpoint Group."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/endpoints'
+            % endpoint_group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+        """Create Endpoint Group to Project Association."""
+        body = None
+        resp, body = self.put(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id), body)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_endpoint_group_for_project(self, endpoint_group_id, project_id):
+        """Get Endpoint Group to Project Association."""
+        resp, body = self.get(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_endpoint_group_from_project(
+        self, endpoint_group_id, project_id):
+        """Delete Endpoint Group to Project Association."""
+        resp, body = self.delete(
+            self.ep_filter + '/endpoint_groups/%s/projects/%s'
+            % (endpoint_group_id, project_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/endpoints_client.py b/tempest/lib/services/identity/v3/endpoints_client.py
index 236b34c..de85388 100644
--- a/tempest/lib/services/identity/v3/endpoints_client.py
+++ b/tempest/lib/services/identity/v3/endpoints_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index f823b21..6f82067 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#groups
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -110,6 +111,6 @@
 
     def check_group_user_existence(self, group_id, user_id):
         """Check user in group."""
-        resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+        resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/identity_providers_client.py b/tempest/lib/services/identity/v3/identity_providers_client.py
new file mode 100644
index 0000000..002bc8c
--- /dev/null
+++ b/tempest/lib/services/identity/v3/identity_providers_client.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class IdentityProvidersClient(rest_client.RestClient):
+
+    def register_identity_provider(self, identity_provider_id, **kwargs):
+        """Register an identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-an-identity-provider
+        """
+        post_body = json.dumps({'identity_provider': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+            post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_identity_providers(self, **params):
+        """List identity providers.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-identity-providers
+        """
+        url = 'identity_providers'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_identity_provider(self, identity_provider_id):
+        """Get identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-identity-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_identity_provider(self, identity_provider_id):
+        """Delete identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-identity-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_identity_provider(self, identity_provider_id, **kwargs):
+        """Update identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-identity-provider
+        """
+        post_body = json.dumps({'identity_provider': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+            post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 3949437..f937ed6 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -51,7 +51,7 @@
     def check_user_inherited_project_role_on_domain(
             self, domain_id, user_id, role_id):
         """Checks whether a user has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/users/%s/roles/%s/inherited_to_projects"
             % (domain_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -88,7 +88,7 @@
     def check_group_inherited_project_role_on_domain(
             self, domain_id, group_id, role_id):
         """Checks whether a group has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/groups/%s/roles/%s/inherited_to_projects"
             % (domain_id, group_id, role_id))
         self.expected_success(204, resp.status)
@@ -115,7 +115,7 @@
     def check_user_has_flag_on_inherited_to_project(
             self, project_id, user_id, role_id):
         """Check if user has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
             % (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -142,7 +142,7 @@
     def check_group_has_flag_on_inherited_to_project(
             self, project_id, group_id, role_id):
         """Check if group has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
             % (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/mappings_client.py b/tempest/lib/services/identity/v3/mappings_client.py
new file mode 100644
index 0000000..a924b33
--- /dev/null
+++ b/tempest/lib/services/identity/v3/mappings_client.py
@@ -0,0 +1,91 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class MappingsClient(rest_client.RestClient):
+
+    def create_mapping(self, mapping_id, **kwargs):
+        """Create a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#create-a-mapping
+        """
+        post_body = json.dumps({'mapping': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_mapping(self, mapping_id):
+        """Get a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-a-mapping
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/mappings/%s' % mapping_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_mapping(self, mapping_id, **kwargs):
+        """Update a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-a-mapping
+        """
+        post_body = json.dumps({'mapping': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_mappings(self, **kwargs):
+        """List mappings.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-mappings
+        """
+        url = 'OS-FEDERATION/mappings'
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_mapping(self, mapping_id):
+        """Delete a mapping.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-mapping
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/mappings/%s' % mapping_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index 6ca401b..564d6d6 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -18,9 +18,7 @@
 import hmac
 import random
 import time
-
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 from oslo_serialization import jsonutils as json
 
@@ -33,9 +31,9 @@
     def _escape(self, s):
         """Escape a unicode string in an OAuth-compatible fashion."""
         safe = b'~'
-        s = s.encode('utf-8') if isinstance(s, six.text_type) else s
+        s = s.encode('utf-8') if isinstance(s, str) else s
         s = urlparse.quote(s, safe)
-        if isinstance(s, six.binary_type):
+        if isinstance(s, bytes):
             s = s.decode('utf-8')
         return s
 
@@ -47,8 +45,8 @@
                                         verifier=None,
                                         http_method='GET'):
         """Generate OAUTH params along with signature."""
-        timestamp = six.text_type(int(time.time()))
-        nonce = six.text_type(random.getrandbits(64)) + timestamp
+        timestamp = str(int(time.time()))
+        nonce = str(random.getrandbits(64)) + timestamp
         oauth_params = [
             ('oauth_nonce', nonce),
             ('oauth_timestamp', timestamp),
@@ -71,7 +69,7 @@
         normalized_params = '&'.join(parameter_parts)
 
         # normalize_uri
-        scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+        scheme, netloc, path, params, _, _ = urlparse.urlparse(uri)
         scheme = scheme.lower()
         netloc = netloc.lower()
         path = path.replace('//', '/')
diff --git a/tempest/lib/services/identity/v3/policies_client.py b/tempest/lib/services/identity/v3/policies_client.py
index 31c0d18..41def38 100644
--- a/tempest/lib/services/identity/v3/policies_client.py
+++ b/tempest/lib/services/identity/v3/policies_client.py
@@ -185,3 +185,27 @@
         resp, body = self.delete(url)
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
+
+    def list_endpoints_for_policy(self, policy_id):
+        """List policy and service endpoint associations.
+
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/#list-policy-and-service-endpoint-associations
+        """
+        url = "policies/{0}/OS-ENDPOINT-POLICY/endpoints".format(policy_id)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_policy_for_endpoint(self, endpoint_id):
+        """Show the effective policy associated with an endpoint
+
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/#show-the-effective-policy-associated-with-an-endpoint
+        """
+        url = "endpoints/{0}/OS-ENDPOINT-POLICY/policy".format(endpoint_id)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/projects_client.py b/tempest/lib/services/identity/v3/projects_client.py
index b186fba..fffbe7a 100644
--- a/tempest/lib/services/identity/v3/projects_client.py
+++ b/tempest/lib/services/identity/v3/projects_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/protocols_client.py b/tempest/lib/services/identity/v3/protocols_client.py
new file mode 100644
index 0000000..19aa426
--- /dev/null
+++ b/tempest/lib/services/identity/v3/protocols_client.py
@@ -0,0 +1,97 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class ProtocolsClient(rest_client.RestClient):
+
+    def add_protocol_to_identity_provider(self, idp_id, protocol_id,
+                                          **kwargs):
+        """Add protocol to identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#add-protocol-to-identity-provider
+        """
+        post_body = json.dumps({'protocol': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id), post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_protocols_of_identity_provider(self, idp_id, **kwargs):
+        """List protocols of identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-protocols-of-identity-provider
+        """
+        url = 'OS-FEDERATION/identity_providers/%s/protocols' % idp_id
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_protocol_for_identity_provider(self, idp_id, protocol_id):
+        """Get protocol for identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-protocol-for-identity-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_mapping_for_identity_provider(self, idp_id, protocol_id,
+                                             **kwargs):
+        """Update attribute mapping for identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-attribute-mapping-for-identity-provider
+        """
+        post_body = json.dumps({'protocol': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id), post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_protocol_from_identity_provider(self, idp_id, protocol_id):
+        """Delete a protocol from identity provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-protocol-from-identity-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/identity_providers/%s/protocols/%s'
+            % (idp_id, protocol_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/regions_client.py b/tempest/lib/services/identity/v3/regions_client.py
index a598c9c..3aed5b8 100644
--- a/tempest/lib/services/identity/v3/regions_client.py
+++ b/tempest/lib/services/identity/v3/regions_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#regions
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/role_assignments_client.py b/tempest/lib/services/identity/v3/role_assignments_client.py
index 51ee8f6..f615709 100644
--- a/tempest/lib/services/identity/v3/role_assignments_client.py
+++ b/tempest/lib/services/identity/v3/role_assignments_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index f9356be..4836784 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -89,6 +90,13 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def create_user_role_on_system(self, user_id, role_id):
+        """Add roles to a user on the system."""
+        resp, body = self.put('system/users/%s/roles/%s' %
+                              (user_id, role_id), None)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def list_user_roles_on_project(self, project_id, user_id):
         """list roles of a user on a project."""
         resp, body = self.get('projects/%s/users/%s/roles' %
@@ -105,6 +113,13 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def list_user_roles_on_system(self, user_id):
+        """list roles of a user on the system."""
+        resp, body = self.get('system/users/%s/roles' % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def delete_role_from_user_on_project(self, project_id, user_id, role_id):
         """Delete role of a user on a project."""
         resp, body = self.delete('projects/%s/users/%s/roles/%s' %
@@ -119,19 +134,32 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def delete_role_from_user_on_system(self, user_id, role_id):
+        """Delete role of a user on the system."""
+        resp, body = self.delete('system/users/%s/roles/%s' %
+                                 (user_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def check_user_role_existence_on_project(self, project_id,
                                              user_id, role_id):
         """Check role of a user on a project."""
-        resp, body = self.head('projects/%s/users/%s/roles/%s' %
-                               (project_id, user_id, role_id))
+        resp, _ = self.head('projects/%s/users/%s/roles/%s' %
+                            (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_user_role_existence_on_domain(self, domain_id,
                                             user_id, role_id):
         """Check role of a user on a domain."""
-        resp, body = self.head('domains/%s/users/%s/roles/%s' %
-                               (domain_id, user_id, role_id))
+        resp, _ = self.head('domains/%s/users/%s/roles/%s' %
+                            (domain_id, user_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
+
+    def check_user_role_existence_on_system(self, user_id, role_id):
+        """Check role of a user on the system."""
+        resp, body = self.head('system/users/%s/roles/%s' % (user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -149,6 +177,13 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def create_group_role_on_system(self, group_id, role_id):
+        """Add roles to a group on the system."""
+        resp, body = self.put('system/groups/%s/roles/%s' %
+                              (group_id, role_id), None)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def list_group_roles_on_project(self, project_id, group_id):
         """list roles of a group on a project."""
         resp, body = self.get('projects/%s/groups/%s/roles' %
@@ -165,6 +200,13 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def list_group_roles_on_system(self, group_id):
+        """list roles of a group on the system."""
+        resp, body = self.get('system/groups/%s/roles' % group_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def delete_role_from_group_on_project(self, project_id, group_id, role_id):
         """Delete role of a group on a project."""
         resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
@@ -179,19 +221,33 @@
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def delete_role_from_group_on_system(self, group_id, role_id):
+        """Delete role of a group on the system."""
+        resp, body = self.delete('system/groups/%s/roles/%s' %
+                                 (group_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
     def check_role_from_group_on_project_existence(self, project_id,
                                                    group_id, role_id):
         """Check role of a group on a project."""
-        resp, body = self.head('projects/%s/groups/%s/roles/%s' %
-                               (project_id, group_id, role_id))
+        resp, _ = self.head('projects/%s/groups/%s/roles/%s' %
+                            (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_role_from_group_on_domain_existence(self, domain_id,
                                                   group_id, role_id):
         """Check role of a group on a domain."""
-        resp, body = self.head('domains/%s/groups/%s/roles/%s' %
-                               (domain_id, group_id, role_id))
+        resp, _ = self.head('domains/%s/groups/%s/roles/%s' %
+                            (domain_id, group_id, role_id))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
+
+    def check_role_from_group_on_system_existence(self, group_id, role_id):
+        """Check role of a group on the system."""
+        resp, body = self.head('system/groups/%s/roles/%s' %
+                               (group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -232,14 +288,14 @@
 
     def check_role_inference_rule(self, prior_role, implies_role):
         """Check a role inference rule."""
-        resp, body = self.head('roles/%s/implies/%s' %
-                               (prior_role, implies_role))
+        resp, _ = self.head('roles/%s/implies/%s' %
+                            (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def delete_role_inference_rule(self, prior_role, implies_role):
         """Delete a role inference rule."""
-        resp, body = self.delete('roles/%s/implies/%s' %
-                                 (prior_role, implies_role))
+        resp, _ = self.delete('roles/%s/implies/%s' %
+                              (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/service_providers_client.py b/tempest/lib/services/identity/v3/service_providers_client.py
new file mode 100644
index 0000000..5d4f014
--- /dev/null
+++ b/tempest/lib/services/identity/v3/service_providers_client.py
@@ -0,0 +1,93 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class ServiceProvidersClient(rest_client.RestClient):
+
+    def register_service_provider(self, service_provider_id, **kwargs):
+        """Register a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-a-service-provider
+        """
+        post_body = json.dumps({'service_provider': kwargs})
+        resp, body = self.put(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id,
+            post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_service_providers(self, **kwargs):
+        """List service providers.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-service-providers
+        """
+        url = 'OS-FEDERATION/service_providers'
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def get_service_provider(self, service_provider_id):
+        """Get a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-service-provider
+        """
+        resp, body = self.get(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_service_provider(self, service_provider_id):
+        """Delete a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-service-provider
+        """
+        resp, body = self.delete(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_service_provider(self, service_provider_id, **kwargs):
+        """Update a service provider.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-service-provider
+        """
+        post_body = json.dumps({'service_provider': kwargs})
+        resp, body = self.patch(
+            'OS-FEDERATION/service_providers/%s' % service_provider_id,
+            post_body)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/services_client.py b/tempest/lib/services/identity/v3/services_client.py
index eb961a5..994df2f 100644
--- a/tempest/lib/services/identity/v3/services_client.py
+++ b/tempest/lib/services/identity/v3/services_client.py
@@ -17,8 +17,9 @@
 https://docs.openstack.org/api-ref/identity/v3/index.html#service-catalog-and-endpoints
 """
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index 6956297..c63966a 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
@@ -51,7 +50,7 @@
     def auth(self, user_id=None, username=None, password=None, project_id=None,
              project_name=None, user_domain_id=None, user_domain_name=None,
              project_domain_id=None, project_domain_name=None, domain_id=None,
-             domain_name=None, token=None, app_cred_id=None,
+             domain_name=None, system=None, token=None, app_cred_id=None,
              app_cred_secret=None):
         """Obtains a token from the authentication service
 
@@ -65,6 +64,7 @@
         :param domain_name: a domain name to scope to
         :param project_id: a project id to scope to
         :param project_name: a project name to scope to
+        :param system: whether the token should be scoped to the system
         :param token: a token to re-scope.
 
         Accepts different combinations of credentials.
@@ -74,6 +74,7 @@
         - user_id, password
         - username, password, user_domain_id
         - username, password, project_name, user_domain_id, project_domain_id
+        - username, password, user_domain_id, system
         Validation is left to the server side.
         """
         creds = {
@@ -135,6 +136,8 @@
             creds['auth']['scope'] = dict(domain={'id': domain_id})
         elif domain_name:
             creds['auth']['scope'] = dict(domain={'name': domain_name})
+        elif system:
+            creds['auth']['scope'] = dict(system={system: True})
 
         body = json.dumps(creds, sort_keys=True)
         resp, body = self.post(self.auth_url, body=body)
@@ -191,15 +194,3 @@
             return token, body['token']
         else:
             return token
-
-
-class V3TokenClientJSON(V3TokenClient):
-    LOG = logging.getLogger(__name__)
-
-    def _warn(self):
-        self.LOG.warning("%s class was deprecated and renamed to %s",
-                         self.__class__.__name__, 'V3TokenClient')
-
-    def __init__(self, *args, **kwargs):
-        self._warn()
-        super(V3TokenClientJSON, self).__init__(*args, **kwargs)
diff --git a/tempest/lib/services/identity/v3/trusts_client.py b/tempest/lib/services/identity/v3/trusts_client.py
index f1cc806..48a7956 100644
--- a/tempest/lib/services/identity/v3/trusts_client.py
+++ b/tempest/lib/services/identity/v3/trusts_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/identity/v3/users_client.py b/tempest/lib/services/identity/v3/users_client.py
index f47730f..771ffea 100644
--- a/tempest/lib/services/identity/v3/users_client.py
+++ b/tempest/lib/services/identity/v3/users_client.py
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
@@ -118,3 +119,30 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
+
+    def create_user_ec2_credential(self, user_id, **kwargs):
+        post_body = json.dumps(kwargs)
+        resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
+                               post_body)
+        self.expected_success(201, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_user_ec2_credential(self, user_id, access):
+        resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
+                                 (user_id, access))
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_user_ec2_credentials(self, user_id):
+        resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_user_ec2_credential(self, user_id, access):
+        resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
+                              (user_id, access))
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/image/v1/__init__.py b/tempest/lib/services/image/v1/__init__.py
index 9bd8262..1f33cef 100644
--- a/tempest/lib/services/image/v1/__init__.py
+++ b/tempest/lib/services/image/v1/__init__.py
@@ -12,8 +12,17 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import warnings
+
 from tempest.lib.services.image.v1.image_members_client import \
     ImageMembersClient
 from tempest.lib.services.image.v1.images_client import ImagesClient
 
 __all__ = ['ImageMembersClient', 'ImagesClient']
+
+
+warnings.warn(
+    "The tempest.lib.services.image.v1 module (Image v1 APIs service "
+    "clients) is deprecated in favor of tempest.lib.services.image.v2 "
+    "(Image v2 APIs service clients) and will be removed once Tempest stop "
+    "supporting stable Ussuri.", DeprecationWarning)
diff --git a/tempest/lib/services/image/v1/images_client.py b/tempest/lib/services/image/v1/images_client.py
index 0e76a63..c9a4a94 100644
--- a/tempest/lib/services/image/v1/images_client.py
+++ b/tempest/lib/services/image/v1/images_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 import functools
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 4713cce..abf427c 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 import functools
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
@@ -121,6 +121,14 @@
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
 
+    def show_image_tasks(self, image_id):
+        """Show image tasks."""
+        url = 'images/%s/tasks' % image_id
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
     def is_resource_deleted(self, id):
         try:
             self.show_image(id)
diff --git a/tempest/lib/services/image/v2/namespace_objects_client.py b/tempest/lib/services/image/v2/namespace_objects_client.py
index 0cae816..32f5a2c 100644
--- a/tempest/lib/services/image/v2/namespace_objects_client.py
+++ b/tempest/lib/services/image/v2/namespace_objects_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/image/v2/namespace_tags_client.py b/tempest/lib/services/image/v2/namespace_tags_client.py
index 4315f16..5bca229 100644
--- a/tempest/lib/services/image/v2/namespace_tags_client.py
+++ b/tempest/lib/services/image/v2/namespace_tags_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/image/v2/versions_client.py b/tempest/lib/services/image/v2/versions_client.py
index 1b7f806..98b4fb6 100644
--- a/tempest/lib/services/image/v2/versions_client.py
+++ b/tempest/lib/services/image/v2/versions_client.py
@@ -30,3 +30,13 @@
         self.expected_success(300, resp.status)
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
+
+    def has_version(self, version):
+        """Return True if a version is supported."""
+        version = 'v%s' % version
+        supported = ['SUPPORTED', 'CURRENT']
+        versions = self.list_versions()
+        for version_struct in versions['versions']:
+            if version_struct['id'] == version:
+                return version_struct['status'] in supported
+        return False
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index f7ac046..7e57499 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -36,6 +36,7 @@
 from tempest.lib.services.network.subnetpools_client import SubnetpoolsClient
 from tempest.lib.services.network.subnets_client import SubnetsClient
 from tempest.lib.services.network.tags_client import TagsClient
+from tempest.lib.services.network.trunks_client import TrunksClient
 from tempest.lib.services.network.versions_client import NetworkVersionsClient
 
 __all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
@@ -44,4 +45,4 @@
            'QosClient', 'QosMinimumBandwidthRulesClient', 'QuotasClient',
            'RoutersClient', 'SecurityGroupRulesClient', 'SecurityGroupsClient',
            'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
-           'SubnetsClient', 'TagsClient']
+           'SubnetsClient', 'TagsClient', 'TrunksClient']
diff --git a/tempest/lib/services/network/base.py b/tempest/lib/services/network/base.py
index fe8b244..ee87dd4 100644
--- a/tempest/lib/services/network/base.py
+++ b/tempest/lib/services/network/base.py
@@ -10,8 +10,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/network/trunks_client.py b/tempest/lib/services/network/trunks_client.py
new file mode 100644
index 0000000..2fd9e01
--- /dev/null
+++ b/tempest/lib/services/network/trunks_client.py
@@ -0,0 +1,100 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.network import base
+
+
+class TrunksClient(base.BaseNetworkClient):
+
+    def create_trunk(self, **kwargs):
+        """Creates a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#create-trunk
+        """
+        uri = '/trunks'
+        post_data = {'trunk': kwargs}
+        return self.create_resource(uri, post_data)
+
+    def update_trunk(self, trunk_id, **kwargs):
+        """Updates a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#update-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        put_data = {'trunk': kwargs}
+        return self.update_resource(uri, put_data)
+
+    def show_trunk(self, trunk_id):
+        """Shows details for a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#show-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        return self.show_resource(uri)
+
+    def delete_trunk(self, trunk_id):
+        """Deletes a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-trunk
+        """
+        uri = '/trunks/%s' % trunk_id
+        return self.delete_resource(uri)
+
+    def list_trunks(self, **filters):
+        """Lists trunks to which the tenant has access.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-trunks
+        """
+        uri = '/trunks'
+        return self.list_resources(uri, **filters)
+
+    def add_subports_to_trunk(self, trunk_id, sub_ports):
+        """Add subports to a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#add-subports-to-trunk
+        """
+        uri = '/trunks/%s/add_subports' % trunk_id
+        put_data = {'sub_ports': sub_ports}
+        return self.update_resource(uri, put_data)
+
+    def delete_subports_from_trunk(self, trunk_id, sub_ports):
+        """Deletes subports from a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#delete-subports-from-trunk
+        """
+        uri = '/trunks/%s/remove_subports' % trunk_id
+        put_data = {'sub_ports': sub_ports}
+        return self.update_resource(uri, put_data)
+
+    def list_subports_of_trunk(self, trunk_id):
+        """List subports of a trunk.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/network/v2/index.html#list-subports-for-trunk
+        """
+        uri = '/trunks/%s/get_subports' % trunk_id
+        return self.list_resources(uri)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 8c15a88..52b2534 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -13,10 +13,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
 from xml.etree import ElementTree as etree
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 027fb1f..6d07ec1 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -13,17 +13,25 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
 from xml.etree import ElementTree as etree
 
 import debtcollector.moves
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
+from tempest.lib import exceptions
 
 
 class ContainerClient(rest_client.RestClient):
 
+    def is_resource_deleted(self, container):
+        try:
+            self.list_container_metadata(container)
+        except exceptions.NotFound:
+            return True
+        return False
+
     def update_container(self, container_name, **headers):
         """Creates or Updates a container
 
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index 383aff6..bb82975 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -12,9 +12,10 @@
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+import ssl
 
-from six.moves import http_client as httplib
-from six.moves.urllib import parse as urlparse
+from http import client as httplib
+from urllib import parse as urlparse
 
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions
@@ -22,6 +23,13 @@
 
 class ObjectClient(rest_client.RestClient):
 
+    def is_resource_deleted(self, object_name, container):
+        try:
+            self.get_object(container, object_name)
+        except exceptions.NotFound:
+            return True
+        return False
+
     def create_object(self, container, object_name, data,
                       params=None, metadata=None, headers=None,
                       chunked=False):
@@ -118,7 +126,7 @@
         path = str(parsed.path) + "/"
         path += "%s/%s" % (str(container), str(object_name))
 
-        conn = _create_connection(parsed)
+        conn = self._create_connection(parsed)
         # Send the PUT request and the headers including the "Expect" header
         conn.putrequest('PUT', path)
 
@@ -151,15 +159,20 @@
 
         return resp.status, resp.reason
 
+    def _create_connection(self, parsed_url):
+        """Helper function to create connection with httplib
 
-def _create_connection(parsed_url):
-    """Helper function to create connection with httplib
+        :param parsed_url: parsed url of the remote location
+        """
+        context = None
+        # If CONF.identity.disable_ssl_certificate_validation is true,
+        # do not check ssl certification.
+        if self.dscv:
+            context = ssl._create_unverified_context()
+        if parsed_url.scheme == 'https':
+            conn = httplib.HTTPSConnection(parsed_url.netloc,
+                                           context=context)
+        else:
+            conn = httplib.HTTPConnection(parsed_url.netloc)
 
-    :param parsed_url: parsed url of the remote location
-    """
-    if parsed_url.scheme == 'https':
-        conn = httplib.HTTPSConnection(parsed_url.netloc)
-    else:
-        conn = httplib.HTTPConnection(parsed_url.netloc)
-
-    return conn
+        return conn
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
index 5c20c57..daeaeab 100644
--- a/tempest/lib/services/placement/__init__.py
+++ b/tempest/lib/services/placement/__init__.py
@@ -14,5 +14,7 @@
 
 from tempest.lib.services.placement.placement_client import \
     PlacementClient
+from tempest.lib.services.placement.resource_providers_client import \
+    ResourceProvidersClient
 
-__all__ = ['PlacementClient']
+__all__ = ['PlacementClient', 'ResourceProvidersClient']
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
index b8e91b8..216ac08 100644
--- a/tempest/lib/services/placement/placement_client.py
+++ b/tempest/lib/services/placement/placement_client.py
@@ -12,8 +12,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.common import rest_client
 from tempest.lib.services.placement import base_placement_client
diff --git a/tempest/lib/services/placement/resource_providers_client.py b/tempest/lib/services/placement/resource_providers_client.py
new file mode 100644
index 0000000..3214053
--- /dev/null
+++ b/tempest/lib/services/placement/resource_providers_client.py
@@ -0,0 +1,123 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from urllib import parse as urllib
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class ResourceProvidersClient(base_placement_client.BasePlacementClient):
+    """Client class for resource provider related methods
+
+    This client class aims to support read-only API operations for resource
+    providers. The following resources are supported:
+    * resource providers
+    * resource provider inventories
+    * resource provider aggregates
+    * resource provider usages
+    """
+
+    def list_resource_providers(self, **params):
+        """List resource providers.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-providers
+        """
+        url = '/resource_providers'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_resource_provider(self, rp_uuid):
+        """Show resource provider.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#show-resource-provider
+        """
+        url = '/resource_providers/%s' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_inventories(self, rp_uuid):
+        """List resource provider inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-inventories
+        """
+        url = '/resource_providers/%s/inventories' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_usages(self, rp_uuid):
+        """List resource provider usages.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-usages
+        """
+        url = '/resource_providers/%s/usages' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_aggregates(self, rp_uuid):
+        """List resource provider aggregates.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-aggregates
+        """
+        url = '/resource_providers/%s/aggregates' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_resource_providers_inventories(self, rp_uuid, **kwargs):
+        """Update resource providers inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#update-resource-provider-inventories
+        """
+        url = '/resource_providers/{}/inventories'.format(rp_uuid)
+        data = json.dumps(kwargs)
+        resp, body = self.put(url, data)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_resource_providers_inventories(self, rp_uuid):
+        """Delete resource providers inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#delete-resource-provider-inventories
+        """
+        url = '/resource_providers/{}/inventories'.format(rp_uuid)
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/__init__.py b/tempest/lib/services/volume/__init__.py
index 6855d8e..4b47251 100644
--- a/tempest/lib/services/volume/__init__.py
+++ b/tempest/lib/services/volume/__init__.py
@@ -12,8 +12,7 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
-from tempest.lib.services.volume import v1
 from tempest.lib.services.volume import v2
 from tempest.lib.services.volume import v3
 
-__all__ = ['v1', 'v2', 'v3']
+__all__ = ['v2', 'v3']
diff --git a/tempest/lib/services/volume/v1/__init__.py b/tempest/lib/services/volume/v1/__init__.py
deleted file mode 100644
index 7b5991f..0000000
--- a/tempest/lib/services/volume/v1/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.lib.services.volume.v1.availability_zone_client \
-    import AvailabilityZoneClient
-from tempest.lib.services.volume.v1.backups_client import BackupsClient
-from tempest.lib.services.volume.v1.encryption_types_client import \
-    EncryptionTypesClient
-from tempest.lib.services.volume.v1.extensions_client import ExtensionsClient
-from tempest.lib.services.volume.v1.hosts_client import HostsClient
-from tempest.lib.services.volume.v1.limits_client import LimitsClient
-from tempest.lib.services.volume.v1.qos_client import QosSpecsClient
-from tempest.lib.services.volume.v1.quotas_client import QuotasClient
-from tempest.lib.services.volume.v1.services_client import ServicesClient
-from tempest.lib.services.volume.v1.snapshots_client import SnapshotsClient
-from tempest.lib.services.volume.v1.types_client import TypesClient
-from tempest.lib.services.volume.v1.volumes_client import VolumesClient
-
-__all__ = ['AvailabilityZoneClient', 'BackupsClient', 'EncryptionTypesClient',
-           'ExtensionsClient', 'HostsClient', 'QosSpecsClient', 'QuotasClient',
-           'ServicesClient', 'SnapshotsClient', 'TypesClient', 'VolumesClient',
-           'LimitsClient']
diff --git a/tempest/lib/services/volume/v1/availability_zone_client.py b/tempest/lib/services/volume/v1/availability_zone_client.py
deleted file mode 100644
index be4f539..0000000
--- a/tempest/lib/services/volume/v1/availability_zone_client.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2014 NEC Corporation.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class AvailabilityZoneClient(rest_client.RestClient):
-    """Volume V1 availability zone client."""
-
-    def list_availability_zones(self):
-        resp, body = self.get('os-availability-zone')
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/backups_client.py b/tempest/lib/services/volume/v1/backups_client.py
deleted file mode 100644
index 2289253..0000000
--- a/tempest/lib/services/volume/v1/backups_client.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class BackupsClient(rest_client.RestClient):
-    """Volume V1 Backups client"""
-    api_version = "v1"
-
-    def create_backup(self, **kwargs):
-        """Creates a backup of volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-backup
-        """
-        post_body = json.dumps({'backup': kwargs})
-        resp, body = self.post('backups', post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def restore_backup(self, backup_id, **kwargs):
-        """Restore volume from backup.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#restore-backup
-        """
-        post_body = json.dumps({'restore': kwargs})
-        resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_backup(self, backup_id):
-        """Delete a backup of volume."""
-        resp, body = self.delete('backups/%s' % backup_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_backup(self, backup_id):
-        """Returns the details of a single backup."""
-        url = "backups/%s" % backup_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_backups(self, detail=False):
-        """Information for all the tenant's backups."""
-        url = "backups"
-        if detail:
-            url += "/detail"
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def export_backup(self, backup_id):
-        """Export backup metadata record."""
-        url = "backups/%s/export_record" % backup_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def import_backup(self, **kwargs):
-        """Import backup metadata record."""
-        # TODO(linanbj): Current api-site doesn't contain this API description.
-        # After fixing the api-site, we need to fix here also for putting the
-        # link to api-site.
-        post_body = json.dumps({'backup-record': kwargs})
-        resp, body = self.post("backups/import_record", post_body)
-        body = json.loads(body)
-        self.expected_success(201, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reset_backup_status(self, backup_id, status):
-        """Reset the specified backup's status."""
-        post_body = json.dumps({'os-reset_status': {"status": status}})
-        resp, body = self.post('backups/%s/action' % backup_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_backup(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'backup'
diff --git a/tempest/lib/services/volume/v1/encryption_types_client.py b/tempest/lib/services/volume/v1/encryption_types_client.py
deleted file mode 100644
index 8e75ff9..0000000
--- a/tempest/lib/services/volume/v1/encryption_types_client.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class EncryptionTypesClient(rest_client.RestClient):
-
-    def is_resource_deleted(self, id):
-        try:
-            body = self.show_encryption_type(id)
-            if not body:
-                return True
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'encryption-type'
-
-    def show_encryption_type(self, volume_type_id):
-        """Get the volume encryption type for the specified volume type.
-
-        :param volume_type_id: Id of volume type.
-        """
-        url = "/types/%s/encryption" % volume_type_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_encryption_type(self, volume_type_id, **kwargs):
-        """Create encryption type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-an-encryption-type-for-v2
-        """
-        url = "/types/%s/encryption" % volume_type_id
-        post_body = json.dumps({'encryption': kwargs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_encryption_type(self, volume_type_id):
-        """Delete the encryption type for the specified volume type."""
-        resp, body = self.delete(
-            "/types/%s/encryption/provider" % volume_type_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/extensions_client.py b/tempest/lib/services/volume/v1/extensions_client.py
deleted file mode 100644
index 7b849a8..0000000
--- a/tempest/lib/services/volume/v1/extensions_client.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class ExtensionsClient(rest_client.RestClient):
-    """Volume V1 extensions client."""
-
-    def list_extensions(self):
-        url = 'extensions'
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/hosts_client.py b/tempest/lib/services/volume/v1/hosts_client.py
deleted file mode 100644
index f344678..0000000
--- a/tempest/lib/services/volume/v1/hosts_client.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class HostsClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Host API V1 requests"""
-
-    def list_hosts(self, **params):
-        """Lists all hosts.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-all-hosts
-        """
-        url = 'os-hosts'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/limits_client.py b/tempest/lib/services/volume/v1/limits_client.py
deleted file mode 100644
index e14b2dc..0000000
--- a/tempest/lib/services/volume/v1/limits_client.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2016 Red Hat, Inc.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-
-
-class LimitsClient(rest_client.RestClient):
-    """Volume V1 limits client."""
-
-    api_version = "v1"
-
-    def show_limits(self):
-        """Returns the details of a volume absolute limits."""
-        url = "limits"
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/qos_client.py b/tempest/lib/services/volume/v1/qos_client.py
deleted file mode 100644
index 67f2ead..0000000
--- a/tempest/lib/services/volume/v1/qos_client.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class QosSpecsClient(rest_client.RestClient):
-    """Volume V1 QoS client.
-
-       Client class to send CRUD QoS API requests
-    """
-
-    api_version = "v1"
-
-    def is_resource_deleted(self, qos_id):
-        try:
-            self.show_qos(qos_id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'qos'
-
-    def create_qos(self, **kwargs):
-        """Create a QoS Specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-qos-specification
-        """
-        post_body = json.dumps({'qos_specs': kwargs})
-        resp, body = self.post('qos-specs', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_qos(self, qos_id, force=False):
-        """Delete the specified QoS specification."""
-        resp, body = self.delete(
-            "qos-specs/%s?force=%s" % (qos_id, force))
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_qos(self):
-        """List all the QoS specifications created."""
-        url = 'qos-specs'
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_qos(self, qos_id):
-        """Get the specified QoS specification."""
-        url = "qos-specs/%s" % qos_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def set_qos_key(self, qos_id, **kwargs):
-        """Set the specified keys/values of QoS specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#set-keys-in-qos-specification
-        """
-        put_body = json.dumps({"qos_specs": kwargs})
-        resp, body = self.put('qos-specs/%s' % qos_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def unset_qos_key(self, qos_id, keys):
-        """Unset the specified keys of QoS specification.
-
-        :param keys: keys to delete from the QoS specification.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#unset-keys-in-qos-specification
-        """
-        put_body = json.dumps({'keys': keys})
-        resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def associate_qos(self, qos_id, vol_type_id):
-        """Associate the specified QoS with specified volume-type."""
-        url = "qos-specs/%s/associate" % qos_id
-        url += "?vol_type_id=%s" % vol_type_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_association_qos(self, qos_id):
-        """Get the association of the specified QoS specification."""
-        url = "qos-specs/%s/associations" % qos_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def disassociate_qos(self, qos_id, vol_type_id):
-        """Disassociate the specified QoS with specified volume-type."""
-        url = "qos-specs/%s/disassociate" % qos_id
-        url += "?vol_type_id=%s" % vol_type_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def disassociate_all_qos(self, qos_id):
-        """Disassociate the specified QoS with all associations."""
-        url = "qos-specs/%s/disassociate_all" % qos_id
-        resp, body = self.get(url)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/quotas_client.py b/tempest/lib/services/volume/v1/quotas_client.py
deleted file mode 100644
index 7f191ca..0000000
--- a/tempest/lib/services/volume/v1/quotas_client.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class QuotasClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Quotas API V1 requests"""
-
-    def show_default_quota_set(self, tenant_id):
-        """List the default volume quota set for a tenant."""
-
-        url = 'os-quota-sets/%s/defaults' % tenant_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_quota_set(self, tenant_id, params=None):
-        """List the quota set for a tenant."""
-
-        url = 'os-quota-sets/%s' % tenant_id
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_quota_set(self, tenant_id, **kwargs):
-        """Updates quota set
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-quotas
-        """
-        put_body = jsonutils.dumps({'quota_set': kwargs})
-        resp, body = self.put('os-quota-sets/%s' % tenant_id, put_body)
-        self.expected_success(200, resp.status)
-        body = jsonutils.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_quota_set(self, tenant_id):
-        """Delete the tenant's quota set."""
-        resp, body = self.delete('os-quota-sets/%s' % tenant_id)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/services_client.py b/tempest/lib/services/volume/v1/services_client.py
deleted file mode 100644
index d438a34..0000000
--- a/tempest/lib/services/volume/v1/services_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014 NEC Corporation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-
-
-class ServicesClient(rest_client.RestClient):
-    """Volume V1 volume services client"""
-
-    def list_services(self, **params):
-        url = 'os-services'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/snapshots_client.py b/tempest/lib/services/volume/v1/snapshots_client.py
deleted file mode 100644
index 7dfdcf2..0000000
--- a/tempest/lib/services/volume/v1/snapshots_client.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class SnapshotsClient(rest_client.RestClient):
-    """Client class to send CRUD Volume V1 API requests."""
-
-    create_resp = 200
-
-    def list_snapshots(self, detail=False, **params):
-        """List all the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-snapshots-with-details
-        """
-        url = 'snapshots'
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_snapshot(self, snapshot_id):
-        """Returns the details of a single snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-details
-        """
-        url = "snapshots/%s" % snapshot_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_snapshot(self, **kwargs):
-        """Creates a new snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-snapshot
-        """
-        post_body = json.dumps({'snapshot': kwargs})
-        resp, body = self.post('snapshots', post_body)
-        body = json.loads(body)
-        self.expected_success(self.create_resp, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_snapshot(self, snapshot_id):
-        """Delete Snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#delete-snapshot
-        """
-        resp, body = self.delete("snapshots/%s" % snapshot_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_snapshot(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume-snapshot'
-
-    def reset_snapshot_status(self, snapshot_id, status):
-        """Reset the specified snapshot's status."""
-        post_body = json.dumps({'os-reset_status': {"status": status}})
-        resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_status(self, snapshot_id, **kwargs):
-        """Update the specified snapshot's status."""
-        # TODO(gmann): api-site doesn't contain doc ref
-        # for this API. After fixing the api-site, we need to
-        # add the link here.
-        # Bug https://bugs.launchpad.net/openstack-api-site/+bug/1532645
-
-        post_body = json.dumps({'os-update_snapshot_status': kwargs})
-        url = 'snapshots/%s/action' % snapshot_id
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_snapshot_metadata(self, snapshot_id, metadata):
-        """Create metadata for the snapshot."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.post(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot(self, snapshot_id, **kwargs):
-        """Updates a snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot
-        """
-        put_body = json.dumps({'snapshot': kwargs})
-        resp, body = self.put('snapshots/%s' % snapshot_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_snapshot_metadata(self, snapshot_id):
-        """Get metadata of the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-snapshot-metadata
-        """
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_metadata(self, snapshot_id, **kwargs):
-        """Update metadata for the snapshot.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-snapshot-metadata
-        """
-        put_body = json.dumps(kwargs)
-        url = "snapshots/%s/metadata" % snapshot_id
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_snapshot_metadata_item(self, snapshot_id, id, **kwargs):
-        """Update metadata item for the snapshot."""
-        # TODO(piyush): Current api-site doesn't contain this API description.
-        # After fixing the api-site, we need to fix here also for putting the
-        # link to api-site.
-        # LP: https://bugs.launchpad.net/openstack-api-site/+bug/1529064
-        put_body = json.dumps(kwargs)
-        url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_snapshot_metadata_item(self, snapshot_id, id):
-        """Delete metadata item for the snapshot."""
-        url = "snapshots/%s/metadata/%s" % (snapshot_id, id)
-        resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def force_delete_snapshot(self, snapshot_id):
-        """Force Delete Snapshot."""
-        post_body = json.dumps({'os-force_delete': {}})
-        resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/types_client.py b/tempest/lib/services/volume/v1/types_client.py
deleted file mode 100644
index d434e65..0000000
--- a/tempest/lib/services/volume/v1/types_client.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class TypesClient(rest_client.RestClient):
-    """Client class to send CRUD Volume Types API requests"""
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_volume_type(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume-type'
-
-    def list_volume_types(self, **params):
-        """List all the volume_types created.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-all-volume-types-for-v2
-        """
-        url = 'types'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_type(self, volume_type_id):
-        """Returns the details of a single volume_type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#show-volume-type-details-for-v2
-        """
-        url = "types/%s" % volume_type_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_type(self, **kwargs):
-        """Create volume type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-type-for-v2
-        """
-        post_body = json.dumps({'volume_type': kwargs})
-        resp, body = self.post('types', post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_type(self, volume_type_id):
-        """Deletes the Specified Volume_type.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#delete-volume-type
-        """
-        resp, body = self.delete("types/%s" % volume_type_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_volume_types_extra_specs(self, volume_type_id, **params):
-        """List all the volume_types extra specs created.
-
-        TODO: Current api-site doesn't contain this API description.
-        After fixing the api-site, we need to fix here also for putting
-        the link to api-site.
-        """
-        url = 'types/%s/extra_specs' % volume_type_id
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_type_extra_specs(self, volume_type_id, extra_specs_name):
-        """Returns the details of a single volume_type extra spec."""
-        url = "types/%s/extra_specs/%s" % (volume_type_id, extra_specs_name)
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
-        """Creates a new Volume_type extra spec.
-
-        volume_type_id: Id of volume_type.
-        extra_specs: A dictionary of values to be used as extra_specs.
-        """
-        url = "types/%s/extra_specs" % volume_type_id
-        post_body = json.dumps({'extra_specs': extra_specs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_type_extra_specs(self, volume_type_id, extra_spec_name):
-        """Deletes the Specified Volume_type extra spec."""
-        resp, body = self.delete("types/%s/extra_specs/%s" % (
-            volume_type_id, extra_spec_name))
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_type(self, volume_type_id, **kwargs):
-        """Updates volume type name, description, and/or is_public.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-volume-type
-        """
-        put_body = json.dumps({'volume_type': kwargs})
-        resp, body = self.put('types/%s' % volume_type_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_type_extra_specs(self, volume_type_id, extra_spec_name,
-                                       extra_specs):
-        """Update a volume_type extra spec.
-
-        :param volume_type_id: Id of volume_type.
-        :param extra_spec_name: Name of the extra spec to be updated.
-        :param extra_specs: A dictionary of with key as extra_spec_name and the
-                            updated value.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-extra-specs-for-a-volume-type
-        """
-        url = "types/%s/extra_specs/%s" % (volume_type_id, extra_spec_name)
-        put_body = json.dumps(extra_specs)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
deleted file mode 100644
index 4ed5eb1..0000000
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_serialization import jsonutils as json
-import six
-from six.moves.urllib import parse as urllib
-
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class VolumesClient(rest_client.RestClient):
-    """Client class to send CRUD Volume V1 API requests"""
-
-    def _prepare_params(self, params):
-        """Prepares params for use in get or _ext_get methods.
-
-        If params is a string it will be left as it is, but if it's not it will
-        be urlencoded.
-        """
-        if isinstance(params, six.string_types):
-            return params
-        return urllib.urlencode(params)
-
-    def list_volumes(self, detail=False, params=None):
-        """List all the volumes created.
-
-        Params can be a string (must be urlencoded) or a dictionary.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volumes-with-details
-        """
-        url = 'volumes'
-        if detail:
-            url += '/detail'
-        if params:
-            url += '?%s' % self._prepare_params(params)
-
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume(self, volume_id):
-        """Returns the details of a single volume."""
-        url = "volumes/%s" % volume_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume(self, **kwargs):
-        """Creates a new Volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume
-        """
-        post_body = json.dumps({'volume': kwargs})
-        resp, body = self.post('volumes', post_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume(self, volume_id, **kwargs):
-        """Updates the Specified Volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#update-volume
-        """
-        put_body = json.dumps({'volume': kwargs})
-        resp, body = self.put('volumes/%s' % volume_id, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume(self, volume_id):
-        """Deletes the Specified Volume."""
-        resp, body = self.delete("volumes/%s" % volume_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def upload_volume(self, volume_id, **kwargs):
-        """Uploads a volume in Glance."""
-        post_body = json.dumps({'os-volume_upload_image': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def attach_volume(self, volume_id, **kwargs):
-        """Attaches a volume to a given instance on a given mountpoint.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#attach-volume-to-server
-        """
-        post_body = json.dumps({'os-attach': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def set_bootable_volume(self, volume_id, **kwargs):
-        """set a bootable flag for a volume - true or false."""
-        post_body = json.dumps({'os-set_bootable': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def detach_volume(self, volume_id):
-        """Detaches a volume from an instance."""
-        post_body = json.dumps({'os-detach': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reserve_volume(self, volume_id):
-        """Reserves a volume."""
-        post_body = json.dumps({'os-reserve': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def unreserve_volume(self, volume_id):
-        """Restore a reserved volume ."""
-        post_body = json.dumps({'os-unreserve': {}})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def is_resource_deleted(self, id):
-        try:
-            self.show_volume(id)
-        except lib_exc.NotFound:
-            return True
-        return False
-
-    @property
-    def resource_type(self):
-        """Returns the primary type of resource this client works with."""
-        return 'volume'
-
-    def extend_volume(self, volume_id, **kwargs):
-        """Extend a volume.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#extend-volume-size
-        """
-        post_body = json.dumps({'os-extend': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def reset_volume_status(self, volume_id, **kwargs):
-        """Reset the Specified Volume's Status.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#reset-volume-statuses
-        """
-        post_body = json.dumps({'os-reset_status': kwargs})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_transfer(self, **kwargs):
-        """Create a volume transfer.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#create-volume-transfer
-        """
-        post_body = json.dumps({'transfer': kwargs})
-        resp, body = self.post('os-volume-transfer', post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_transfer(self, transfer_id):
-        """Returns the details of a volume transfer."""
-        url = "os-volume-transfer/%s" % transfer_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_volume_transfers(self, **params):
-        """List all the volume transfers created.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#list-volume-transfers
-        """
-        url = 'os-volume-transfer'
-        if params:
-            url += '?%s' % urllib.urlencode(params)
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_transfer(self, transfer_id):
-        """Delete a volume transfer."""
-        resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def accept_volume_transfer(self, transfer_id, **kwargs):
-        """Accept a volume transfer.
-
-        For a full list of available parameters, please refer to the official
-        API reference:
-        https://docs.openstack.org/api-ref/block-storage/v2/#accept-volume-transfer
-        """
-        url = 'os-volume-transfer/%s/accept' % transfer_id
-        post_body = json.dumps({'accept': kwargs})
-        resp, body = self.post(url, post_body)
-        body = json.loads(body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_readonly(self, volume_id, **kwargs):
-        """Update the Specified Volume readonly."""
-        post_body = json.dumps({'os-update_readonly_flag': kwargs})
-        url = 'volumes/%s/action' % (volume_id)
-        resp, body = self.post(url, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def force_delete_volume(self, volume_id):
-        """Force Delete Volume."""
-        post_body = json.dumps({'os-force_delete': {}})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_volume_metadata(self, volume_id, metadata):
-        """Create metadata for the volume."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.post(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_volume_metadata(self, volume_id):
-        """Get metadata of the volume."""
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.get(url)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_metadata(self, volume_id, metadata):
-        """Update metadata for the volume."""
-        put_body = json.dumps({'metadata': metadata})
-        url = "volumes/%s/metadata" % volume_id
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_volume_metadata_item(self, volume_id, id, meta_item):
-        """Update metadata item for the volume."""
-        put_body = json.dumps({'meta': meta_item})
-        url = "volumes/%s/metadata/%s" % (volume_id, id)
-        resp, body = self.put(url, put_body)
-        body = json.loads(body)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_volume_metadata_item(self, volume_id, id):
-        """Delete metadata item for the volume."""
-        url = "volumes/%s/metadata/%s" % (volume_id, id)
-        resp, body = self.delete(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def retype_volume(self, volume_id, **kwargs):
-        """Updates volume with new volume type."""
-        post_body = json.dumps({'os-retype': kwargs})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
-        self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v2/__init__.py b/tempest/lib/services/volume/v2/__init__.py
index 68982d9..756a41a 100644
--- a/tempest/lib/services/volume/v2/__init__.py
+++ b/tempest/lib/services/volume/v2/__init__.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+import warnings
+
 from tempest.lib.services.volume.v2.availability_zone_client \
     import AvailabilityZoneClient
 from tempest.lib.services.volume.v2.backups_client import BackupsClient
@@ -44,3 +46,9 @@
            'LimitsClient', 'CapabilitiesClient', 'SchedulerStatsClient',
            'SnapshotManageClient', 'VolumeManageClient', 'TransfersClient',
            'QuotaClassesClient']
+
+warnings.warn(
+    "The tempest.lib.services.volume.v2 module (volume v2 APIs service "
+    "clients) is deprecated in favor of tempest.lib.services.volume.v3 "
+    "(volume v3 APIs service clients) and will be removed once Tempest stop "
+    "supporting stable wallaby.", DeprecationWarning)
diff --git a/tempest/lib/services/volume/v3/__init__.py b/tempest/lib/services/volume/v3/__init__.py
index e2fa836..039640b 100644
--- a/tempest/lib/services/volume/v3/__init__.py
+++ b/tempest/lib/services/volume/v3/__init__.py
@@ -39,6 +39,7 @@
     SnapshotManageClient
 from tempest.lib.services.volume.v3.snapshots_client import SnapshotsClient
 from tempest.lib.services.volume.v3.transfers_client import TransfersClient
+from tempest.lib.services.volume.v3.transfers_client import TransfersV355Client
 from tempest.lib.services.volume.v3.types_client import TypesClient
 from tempest.lib.services.volume.v3.versions_client import VersionsClient
 from tempest.lib.services.volume.v3.volume_manage_client import \
@@ -50,5 +51,6 @@
            'GroupsClient', 'HostsClient', 'LimitsClient', 'MessagesClient',
            'QosSpecsClient', 'QuotaClassesClient', 'QuotasClient',
            'SchedulerStatsClient', 'ServicesClient', 'SnapshotManageClient',
-           'SnapshotsClient', 'TransfersClient', 'TypesClient',
-           'VersionsClient', 'VolumeManageClient', 'VolumesClient']
+           'SnapshotsClient', 'TransfersClient', 'TransfersV355Client',
+           'TypesClient', 'VersionsClient', 'VolumeManageClient',
+           'VolumesClient']
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index 1df45fa..4bf7ffb 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import backups as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/group_snapshots_client.py b/tempest/lib/services/volume/v3/group_snapshots_client.py
index 4051c06..0f36fc9 100644
--- a/tempest/lib/services/volume/v3/group_snapshots_client.py
+++ b/tempest/lib/services/volume/v3/group_snapshots_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import group_snapshots as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/group_types_client.py b/tempest/lib/services/volume/v3/group_types_client.py
index 1dcd508..9de36f4 100644
--- a/tempest/lib/services/volume/v3/group_types_client.py
+++ b/tempest/lib/services/volume/v3/group_types_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import group_types as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/groups_client.py b/tempest/lib/services/volume/v3/groups_client.py
index 3d8523d..d1500cf 100644
--- a/tempest/lib/services/volume/v3/groups_client.py
+++ b/tempest/lib/services/volume/v3/groups_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import groups as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/hosts_client.py b/tempest/lib/services/volume/v3/hosts_client.py
index 019a852..9c64659 100644
--- a/tempest/lib/services/volume/v3/hosts_client.py
+++ b/tempest/lib/services/volume/v3/hosts_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import hosts as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/quotas_client.py b/tempest/lib/services/volume/v3/quotas_client.py
index 5b1a52c..3f4c4e1 100644
--- a/tempest/lib/services/volume/v3/quotas_client.py
+++ b/tempest/lib/services/volume/v3/quotas_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import quotas as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/services_client.py b/tempest/lib/services/volume/v3/services_client.py
index 4ba01d7..1111f81 100644
--- a/tempest/lib/services/volume/v3/services_client.py
+++ b/tempest/lib/services/volume/v3/services_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import services as schema
 from tempest.lib.api_schema.response.volume.v3_7 import services as schemav37
diff --git a/tempest/lib/services/volume/v3/snapshots_client.py b/tempest/lib/services/volume/v3/snapshots_client.py
index 8ca2044..ae31ee9 100644
--- a/tempest/lib/services/volume/v3/snapshots_client.py
+++ b/tempest/lib/services/volume/v3/snapshots_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import snapshots as schema
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/transfers_client.py b/tempest/lib/services/volume/v3/transfers_client.py
index f572f95..cc4e1b2 100644
--- a/tempest/lib/services/volume/v3/transfers_client.py
+++ b/tempest/lib/services/volume/v3/transfers_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import transfers as schema
 from tempest.lib.common import rest_client
@@ -23,6 +24,8 @@
 class TransfersClient(rest_client.RestClient):
     """Client class to send CRUD Volume Transfer API requests"""
 
+    resource_path = 'os-volume-transfer'
+
     def create_volume_transfer(self, **kwargs):
         """Create a volume transfer.
 
@@ -31,14 +34,14 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume-transfer
         """
         post_body = json.dumps({'transfer': kwargs})
-        resp, body = self.post('os-volume-transfer', post_body)
+        resp, body = self.post(self.resource_path, post_body)
         body = json.loads(body)
         self.validate_response(schema.create_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
     def show_volume_transfer(self, transfer_id):
         """Returns the details of a volume transfer."""
-        url = "os-volume-transfer/%s" % transfer_id
+        url = "%s/%s" % (self.resource_path, transfer_id)
         resp, body = self.get(url)
         body = json.loads(body)
         self.validate_response(schema.show_volume_transfer, resp, body)
@@ -52,7 +55,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-for-a-project
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
         """
-        url = 'os-volume-transfer'
+        url = self.resource_path
         schema_list_transfers = schema.list_volume_transfers_no_detail
         if detail:
             url += '/detail'
@@ -66,7 +69,7 @@
 
     def delete_volume_transfer(self, transfer_id):
         """Delete a volume transfer."""
-        resp, body = self.delete("os-volume-transfer/%s" % transfer_id)
+        resp, body = self.delete("%s/%s" % (self.resource_path, transfer_id))
         self.validate_response(schema.delete_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -77,9 +80,14 @@
         API reference:
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#accept-a-volume-transfer
         """
-        url = 'os-volume-transfer/%s/accept' % transfer_id
+        url = '%s/%s/accept' % (self.resource_path, transfer_id)
         post_body = json.dumps({'accept': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         self.validate_response(schema.accept_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
+
+
+class TransfersV355Client(TransfersClient):
+    """Client class to send CRUD for the "new" Transfers API (mv 3.55)"""
+    resource_path = 'volume-transfers'
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 7fa24a4..9858d87 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -13,8 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import volume_types as schema
 from tempest.lib.common import rest_client
@@ -65,6 +66,19 @@
         self.validate_response(schema.show_volume_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
+    def show_default_volume_type(self):
+        """Returns the details of a single volume type.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-default-volume-type
+        """
+        url = "types/default"
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.show_volume_type, resp, body)
+        return rest_client.ResponseBody(resp, body)
+
     def create_volume_type(self, **kwargs):
         """Create volume type.
 
diff --git a/tempest/lib/services/volume/v3/versions_client.py b/tempest/lib/services/volume/v3/versions_client.py
index 4ac4112..0bed827 100644
--- a/tempest/lib/services/volume/v3/versions_client.py
+++ b/tempest/lib/services/volume/v3/versions_client.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from six.moves.urllib.parse import urljoin
+from urllib.parse import urljoin
 
 from oslo_serialization import jsonutils as json
 
diff --git a/tempest/lib/services/volume/v3/volume_manage_client.py b/tempest/lib/services/volume/v3/volume_manage_client.py
index 85b1b82..f6642c5 100644
--- a/tempest/lib/services/volume/v3/volume_manage_client.py
+++ b/tempest/lib/services/volume/v3/volume_manage_client.py
@@ -15,6 +15,7 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume import manage_volume as schema
 from tempest.lib.common import rest_client
 
 
@@ -30,6 +31,6 @@
         """
         post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('os-volume-manage', post_body)
-        self.expected_success(202, resp.status)
         body = json.loads(body)
+        self.validate_response(schema.manage_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index b8535d8..9c6fe68 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -13,9 +13,9 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
-import six
-from six.moves.urllib import parse as urllib
 
 from tempest.lib.api_schema.response.volume import volumes as schema
 from tempest.lib.common import rest_client
@@ -32,7 +32,7 @@
         If params is a string it will be left as it is, but if it's not it will
         be urlencoded.
         """
-        if isinstance(params, six.string_types):
+        if isinstance(params, str):
             return params
         return urllib.urlencode(params)
 
diff --git a/tempest/manager.py b/tempest/manager.py
deleted file mode 100644
index b485ef2..0000000
--- a/tempest/manager.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from oslo_log import log as logging
-
-from tempest import clients as tempest_clients
-from tempest import config
-from tempest.lib.services import clients
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class Manager(clients.ServiceClients):
-    """Service client manager class for backward compatibility
-
-    The former manager.Manager is not a stable interface in Tempest,
-    nonetheless it is consumed by a number of plugins already. This class
-    exists to provide some grace time for the move to tempest.lib.
-    """
-
-    def __init__(self, credentials, scope='project'):
-        msg = ("tempest.manager.Manager is not a stable interface and as such "
-               "it should not be imported directly. It will be removed as "
-               "soon as the client manager becomes available in tempest.lib.")
-        LOG.warning(msg)
-        dscv = CONF.identity.disable_ssl_certificate_validation
-        _, uri = tempest_clients.get_auth_provider_class(credentials)
-        super(Manager, self).__init__(
-            credentials=credentials, scope=scope,
-            identity_uri=uri,
-            disable_ssl_certificate_validation=dscv,
-            ca_certs=CONF.identity.ca_certificates_file,
-            trace_requests=CONF.debug.trace_requests)
-
-
-def get_auth_provider(credentials, pre_auth=False, scope='project'):
-    """Shim to get_auth_provider in clients.py
-
-    get_auth_provider used to be hosted in this module, but it has been
-    moved to clients.py now as a more permanent location.
-    This module will be removed eventually, and this shim is only
-    maintained for the benefit of plugins already consuming this interface.
-    """
-    msg = ("tempest.manager.get_auth_provider is not a stable interface and "
-           "as such it should not imported directly. It will be removed as "
-           "the client manager becomes available in tempest.lib.")
-    LOG.warning(msg)
-    return tempest_clients.get_auth_provider(credentials=credentials,
-                                             pre_auth=pre_auth, scope=scope)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ff860d5..39021d5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -18,6 +18,7 @@
 import subprocess
 
 import netaddr
+
 from oslo_log import log
 from oslo_serialization import jsonutils as json
 from oslo_utils import netutils
@@ -46,7 +47,7 @@
 class ScenarioTest(tempest.test.BaseTestCase):
     """Base class for scenario tests. Uses tempest own clients. """
 
-    credentials = ['primary']
+    credentials = ['primary', 'admin']
 
     compute_min_microversion = None
     compute_max_microversion = LATEST_MICROVERSION
@@ -92,15 +93,12 @@
             placement_microversion=self.placement_request_microversion))
 
     def setup_compute_client(cls):
-        """Compute and Compute security groups client"""
+        """Compute client"""
         cls.compute_images_client = cls.os_primary.compute_images_client
         cls.keypairs_client = cls.os_primary.keypairs_client
-        cls.compute_security_groups_client = (
-            cls.os_primary.compute_security_groups_client)
-        cls.compute_security_group_rules_client = (
-            cls.os_primary.compute_security_group_rules_client)
         cls.servers_client = cls.os_primary.servers_client
         cls.interface_client = cls.os_primary.interfaces_client
+        cls.flavors_client = cls.os_primary.flavors_client
 
     def setup_network_client(cls):
         """Neutron network client"""
@@ -117,9 +115,6 @@
     def setup_clients(cls):
         """This setup the service clients for the tests"""
         super(ScenarioTest, cls).setup_clients()
-        cls.flavors_client = cls.os_primary.flavors_client
-        cls.compute_floating_ips_client = (
-            cls.os_primary.compute_floating_ips_client)
         if CONF.service_available.glance:
             # Check if glance v1 is available to determine which client to use.
             if CONF.image_feature_enabled.api_v1:
@@ -143,10 +138,20 @@
     # resp part which is not used in scenario tests
 
     def create_port(self, network_id, client=None, **kwargs):
-        """Creates port"""
+        """Creates port for the respective network_id
+
+        :param network_id: the id of the network
+        :param client: the client to use, defaults to self.ports_client
+        :param kwargs: additional arguments such as:
+            - namestart - a string to generate a name for the port from
+                        - default is self.__class__.__name__
+            - 'binding:vnic_type' - defaults to CONF.network.port_vnic_type
+            - 'binding:profile' - defaults to CONF.network.port_profile
+        """
         if not client:
             client = self.ports_client
-        name = data_utils.rand_name(self.__class__.__name__)
+        name = data_utils.rand_name(
+            kwargs.pop('namestart', self.__class__.__name__))
         if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
             kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
         if CONF.network.port_profile and 'binding:profile' not in kwargs:
@@ -155,12 +160,13 @@
             name=name,
             network_id=network_id,
             **kwargs)
+        self.assertIsNotNone(result, 'Unable to allocate port')
         port = result['port']
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         client.delete_port, port['id'])
         return port
 
-    def create_keypair(self, client=None):
+    def create_keypair(self, client=None, **kwargs):
         """Creates keypair
 
         Keypair is a public key of OpenSSH key pair used for accessing
@@ -170,10 +176,11 @@
         """
         if not client:
             client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
+        if not kwargs.get('name'):
+            kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
         # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
+        body = client.create_keypair(**kwargs)
+        self.addCleanup(client.delete_keypair, kwargs['name'])
         return body['keypair']
 
     def create_server(self, name=None, image_id=None, flavor=None,
@@ -199,6 +206,14 @@
                 direct: an SR-IOV port that is directly attached to a VM
                 macvtap: an SR-IOV port that is attached to a VM via a macvtap
                          device.
+                direct-physical: an SR-IOV port that is directly attached to a
+                                 VM using physical instead of virtual
+                                 functions.
+                baremetal: a baremetal port directly attached to a baremetal
+                           node.
+                virtio-forwarder:  an SR-IOV port that is indirectly attached
+                                   to a VM using a low-latency vhost-user
+                                   forwarding process.
               Defaults to ``CONF.network.port_vnic_type``.
             * *port_profile* (``dict``) --
               This attribute is a dictionary that can be used (with admin
@@ -206,6 +221,9 @@
               the port.
               example: port_profile = "capabilities:[switchdev]"
               Defaults to ``CONF.network.port_profile``.
+            * *create_port_body* (``dict``) --
+              This attribute is a dictionary of additional arguments to be
+              passed to create_port method.
         """
 
         # NOTE(jlanoux): As a first step, ssh checks in the scenario
@@ -231,7 +249,7 @@
         # every network
         if vnic_type or profile:
             ports = []
-            create_port_body = {}
+            create_port_body = kwargs.pop('create_port_body', {})
 
             if vnic_type:
                 create_port_body['binding:vnic_type'] = vnic_type
@@ -306,7 +324,7 @@
         return server
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
-                      imageRef=None, volume_type=None):
+                      imageRef=None, volume_type=None, **kwargs):
         """Creates volume
 
         This wrapper utility creates volume and waits for volume to be
@@ -326,11 +344,11 @@
             size = max(size, min_disk)
         if name is None:
             name = data_utils.rand_name(self.__class__.__name__ + "-volume")
-        kwargs = {'display_name': name,
-                  'snapshot_id': snapshot_id,
-                  'imageRef': imageRef,
-                  'volume_type': volume_type,
-                  'size': size}
+        kwargs.update({'name': name,
+                       'snapshot_id': snapshot_id,
+                       'imageRef': imageRef,
+                       'volume_type': volume_type,
+                       'size': size})
 
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
@@ -352,21 +370,35 @@
 
     def create_backup(self, volume_id, name=None, description=None,
                       force=False, snapshot_id=None, incremental=False,
-                      container=None):
-        """Creates backup
+                      container=None, **kwargs):
+        """Creates a backup of the given volume_id or snapshot_id
 
-        This wrapper utility creates backup and waits for backup to be
-        in 'available' state.
+        This wrapper utility creates a backup and waits until it is in
+       'available' state.
+
+        :param volume_id: UUID of the volume to back up
+        :param name: backup name, '$classname-backup' by default
+        :param description: Description of the backup, None by default
+        :param force: boolean whether to backup even if the volume is attached
+            False by default
+        :param snapshot_id: UUID of the source snapshot to back up
+            None by default
+        :param incremental: boolean, False by default
+        :param container: a container name, None by default
+        :param **kwargs: additional parameters per the documentation:
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #create-a-backup
         """
 
         name = name or data_utils.rand_name(
             self.__class__.__name__ + "-backup")
-        kwargs = {'name': name,
-                  'description': description,
-                  'force': force,
-                  'snapshot_id': snapshot_id,
-                  'incremental': incremental,
-                  'container': container}
+        args = {'name': name,
+                'description': description,
+                'force': force,
+                'snapshot_id': snapshot_id,
+                'incremental': incremental,
+                'container': container}
+        args.update(kwargs)
         backup = self.backups_client.create_backup(volume_id=volume_id,
                                                    **kwargs)['backup']
         self.addCleanup(self.backups_client.delete_backup, backup['id'])
@@ -374,14 +406,20 @@
                                                 backup['id'], 'available')
         return backup
 
-    def restore_backup(self, backup_id):
-        """Restore backup
+    def restore_backup(self, backup_id, **kwargs):
+        """Restores a backup given by the backup_id
 
-        This wrapper utility restores backup and waits for backup to be
-        in 'available' state.
+        This wrapper utility restores a backup and waits until it is in
+        'available' state.
+
+        :param backup_id: UUID of a backup to restore
+        :param **kwargs: additional parameters per the documentation:
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #restore-a-backup
         """
 
-        restore = self.backups_client.restore_backup(backup_id)['restore']
+        body = self.backups_client.restore_backup(backup_id, **kwargs)
+        restore = body['restore']
         self.addCleanup(self.volumes_client.delete_volume,
                         restore['volume_id'])
         waiters.wait_for_volume_resource_status(self.backups_client,
@@ -392,29 +430,36 @@
         self.assertEqual(backup_id, restore['backup_id'])
         return restore
 
-    def rebuild_server(self, server_id, image=None,
-                       preserve_ephemeral=False, wait=True,
-                       rebuild_kwargs=None):
+    def rebuild_server(self, server_id, image=None, preserve_ephemeral=False,
+                       wait=True, **kwargs):
         if image is None:
             image = CONF.compute.image_ref
-        rebuild_kwargs = rebuild_kwargs or {}
         LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
                   server_id, image, preserve_ephemeral)
         self.servers_client.rebuild_server(
             server_id=server_id,
             image_ref=image,
             preserve_ephemeral=preserve_ephemeral,
-            **rebuild_kwargs)
+            **kwargs)
         if wait:
             waiters.wait_for_server_status(self.servers_client,
                                            server_id, 'ACTIVE')
 
     def create_volume_snapshot(self, volume_id, name=None, description=None,
-                               metadata=None, force=False):
-        """Creates volume
+                               metadata=None, force=False, **kwargs):
+        """Creates volume's snapshot
 
-        This wrapper utility creates volume snapshot and waits for backup
-        to be in 'available' state.
+        This wrapper utility creates volume snapshot and waits for it until
+        it is in 'available' state.
+
+        :param volume_id: UUID of a volume to create snapshot of
+        :param name: name of the snapshot, '$classname-snapshot' by default
+        :param description: description of the snapshot
+        :param metadata: metadata key and value pairs for the snapshot
+        :param force: whether snapshot even when the volume is attached
+        :param **kwargs: additional parameters per the doc
+            https://docs.openstack.org/api-ref/block-storage/v3/
+            #create-a-snapshot
         """
 
         name = name or data_utils.rand_name(
@@ -422,9 +467,10 @@
         snapshot = self.snapshots_client.create_snapshot(
             volume_id=volume_id,
             force=force,
-            display_name=name,
+            name=name,
             description=description,
-            metadata=metadata)['snapshot']
+            metadata=metadata,
+            **kwargs)['snapshot']
 
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
@@ -435,7 +481,7 @@
             snapshot['id'])['snapshot']
         return snapshot
 
-    def _cleanup_volume_type(self, volume_type):
+    def cleanup_volume_type(self, volume_type):
         """Clean up a given volume type.
 
         Ensuring all volumes associated to a type are first removed before
@@ -454,7 +500,8 @@
             admin_volumes_client.wait_for_resource_deletion(volume['id'])
         admin_volume_type_client.delete_volume_type(volume_type['id'])
 
-    def create_volume_type(self, client=None, name=None, backend_name=None):
+    def create_volume_type(self, client=None, name=None, backend_name=None,
+                           **kwargs):
         """Creates volume type
 
         In a multiple-storage back-end configuration,
@@ -481,69 +528,175 @@
 
         LOG.debug("Creating a volume type: %s on backend %s",
                   randomized_name, backend_name)
-        extra_specs = {}
+        extra_specs = kwargs.pop("extra_specs", {})
         if backend_name:
-            extra_specs = {"volume_backend_name": backend_name}
+            extra_specs.update({"volume_backend_name": backend_name})
 
-        volume_type = client.create_volume_type(
-            name=randomized_name, extra_specs=extra_specs)['volume_type']
+        volume_type_resp = client.create_volume_type(
+            name=randomized_name, extra_specs=extra_specs, **kwargs)
+        volume_type = volume_type_resp['volume_type']
+
         self.assertIn('id', volume_type)
-        self.addCleanup(self._cleanup_volume_type, volume_type)
+        self.addCleanup(self.cleanup_volume_type, volume_type)
         return volume_type
 
-    def _create_loginable_secgroup_rule(self, secgroup_id=None):
-        _client = self.compute_security_groups_client
-        _client_rules = self.compute_security_group_rules_client
-        if secgroup_id is None:
-            sgs = _client.list_security_groups()['security_groups']
-            for sg in sgs:
-                if sg['name'] == 'default':
-                    secgroup_id = sg['id']
-
-        # These rules are intended to permit inbound ssh and icmp
-        # traffic from all sources, so no group_id is provided.
-        # Setting a group_id would only permit traffic from ports
-        # belonging to the same security group.
-        rulesets = [
-            {
-                # ssh
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ping
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '0.0.0.0/0',
-            }
-        ]
-        rules = list()
-        for ruleset in rulesets:
-            sg_rule = _client_rules.create_security_group_rule(
-                parent_group_id=secgroup_id, **ruleset)['security_group_rule']
-            rules.append(sg_rule)
-        return rules
-
-    def _create_security_group(self):
-        """Create security group and add rules to security group"""
-        sg_name = data_utils.rand_name(self.__class__.__name__)
-        sg_desc = sg_name + " description"
-        secgroup = self.compute_security_groups_client.create_security_group(
-            name=sg_name, description=sg_desc)['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(secgroup['description'], sg_desc)
-        self.addCleanup(
-            test_utils.call_and_ignore_notfound_exc,
-            self.compute_security_groups_client.delete_security_group,
-            secgroup['id'])
+    def create_security_group(self, security_group_rules_client=None,
+                              project_id=None,
+                              namestart='secgroup-smoke',
+                              security_groups_client=None):
+        if security_group_rules_client is None:
+            security_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
+        if project_id is None:
+            project_id = security_groups_client.project_id
+        secgroup = self.create_empty_security_group(
+            namestart=namestart, client=security_groups_client,
+            project_id=project_id)
 
         # Add rules to the security group
-        self._create_loginable_secgroup_rule(secgroup['id'])
+        rules = self.create_loginable_secgroup_rule(
+            security_group_rules_client=security_group_rules_client,
+            secgroup=secgroup,
+            security_groups_client=security_groups_client)
+        for rule in rules:
+            self.assertEqual(project_id, rule['project_id'])
+            self.assertEqual(secgroup['id'], rule['security_group_id'])
         return secgroup
 
+    def create_empty_security_group(self, client=None, project_id=None,
+                                    namestart='secgroup-smoke'):
+        """Create a security group without rules.
+
+        Default rules will be created:
+         - IPv4 egress to any
+         - IPv6 egress to any
+        :param project_id: secgroup will be created in this project
+        :returns: the created security group
+        """
+
+        if client is None:
+            client = self.security_groups_client
+        if not project_id:
+            project_id = client.project_id
+        sg_name = data_utils.rand_name(namestart)
+        sg_desc = sg_name + " description"
+        sg_dict = dict(name=sg_name,
+                       description=sg_desc)
+        sg_dict['project_id'] = project_id
+        result = client.create_security_group(**sg_dict)
+
+        secgroup = result['security_group']
+        self.assertEqual(secgroup['name'], sg_name)
+        self.assertEqual(project_id, secgroup['project_id'])
+        self.assertEqual(secgroup['description'], sg_desc)
+
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        client.delete_security_group, secgroup['id'])
+        return secgroup
+
+    def create_security_group_rule(self, secgroup=None,
+                                   sec_group_rules_client=None,
+                                   project_id=None,
+                                   security_groups_client=None, **kwargs):
+        """Create a rule from a dictionary of rule parameters.
+
+        Create a rule in a secgroup. if secgroup not defined will search for
+        default secgroup in project_id.
+        :param secgroup: the security group.
+        :param project_id: if secgroup not passed -- the tenant in which to
+            search for default secgroup
+        :param kwargs: a dictionary containing rule parameters:
+            for example, to allow incoming ssh:
+            rule = {
+                    direction: 'ingress'
+                    protocol:'tcp',
+                    port_range_min: 22,
+                    port_range_max: 22
+                    }
+        """
+
+        if sec_group_rules_client is None:
+            sec_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
+        if not project_id:
+            project_id = security_groups_client.project_id
+        if secgroup is None:
+            # Get default secgroup for project_id
+            default_secgroups = security_groups_client.list_security_groups(
+                name='default', project_id=project_id)['security_groups']
+            msg = "No default security group for project %s." % (project_id)
+            self.assertNotEmpty(default_secgroups, msg)
+            secgroup = default_secgroups[0]
+
+        ruleset = dict(security_group_id=secgroup['id'],
+                       project_id=secgroup['project_id'])
+        ruleset.update(kwargs)
+
+        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
+        sg_rule = sg_rule['security_group_rule']
+
+        self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
+        self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
+
+        return sg_rule
+
+    def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+                                       secgroup=None,
+                                       security_groups_client=None):
+        """Create loginable security group rule by neutron clients by default.
+
+        This function will create:
+        1. egress and ingress tcp port 22 allow rule in order to allow ssh
+        access for ipv4.
+        2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
+        3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
+        """
+
+        if security_group_rules_client is None:
+            security_group_rules_client = self.security_group_rules_client
+        if security_groups_client is None:
+            security_groups_client = self.security_groups_client
+        rules = []
+        rulesets = [
+            dict(
+                # ssh
+                protocol='tcp',
+                port_range_min=22,
+                port_range_max=22,
+            ),
+            dict(
+                # ping
+                protocol='icmp',
+            ),
+            dict(
+                # ipv6-icmp for ping6
+                protocol='icmp',
+                ethertype='IPv6',
+            )
+        ]
+        sec_group_rules_client = security_group_rules_client
+        for ruleset in rulesets:
+            for r_direction in ['ingress', 'egress']:
+                ruleset['direction'] = r_direction
+                try:
+                    sg_rule = self.create_security_group_rule(
+                        sec_group_rules_client=sec_group_rules_client,
+                        secgroup=secgroup,
+                        security_groups_client=security_groups_client,
+                        **ruleset)
+                except lib_exc.Conflict as ex:
+                    # if rule already exist - skip rule and continue
+                    msg = 'Security group rule already exists'
+                    if msg not in ex._error_string:
+                        raise ex
+                else:
+                    self.assertEqual(r_direction, sg_rule['direction'])
+                    rules.append(sg_rule)
+
+        return rules
+
     def get_remote_client(self, ip_address, username=None, private_key=None,
                           server=None):
         """Get a SSH client to a remote server
@@ -573,21 +726,16 @@
         linux_client.validate_authentication()
         return linux_client
 
-    def image_create(self, name='scenario-img'):
+    def image_create(self, name='scenario-img', **kwargs):
         img_path = CONF.scenario.img_file
         if not os.path.exists(img_path):
-            # TODO(kopecmartin): replace LOG.warning for rasing
-            # InvalidConfiguration exception after tempest 25.0.0 is
-            # released - there will be one release which accepts both
-            # behaviors in order to avoid many failures across CIs and etc.
-            LOG.warning(
+            lib_exc.InvalidConfiguration(
                 'Starting Tempest 25.0.0 release, CONF.scenario.img_file need '
                 'a full path for the image. CONF.scenario.img_dir was '
                 'deprecated and will be removed in the next release. Till '
-                'Tempest 25.0.0, old behavior is maintained and keep working '
+                'Tempest 25.0.0, old behavior was maintained and kept working '
                 'but starting Tempest 26.0.0, you need to specify the full '
                 'path in CONF.scenario.img_file config option.')
-            img_path = os.path.join(CONF.scenario.img_dir, img_path)
         img_container_format = CONF.scenario.img_container_format
         img_disk_format = CONF.scenario.img_disk_format
         img_properties = CONF.scenario.img_properties
@@ -613,6 +761,7 @@
             # Additional properties are flattened out in the v2 API.
             if img_properties:
                 params.update(img_properties)
+        params.update(kwargs)
         body = self.image_client.create_image(**params)
         image = body['image'] if 'image' in body else body
         self.addCleanup(self.image_client.delete_image, image['id'])
@@ -625,7 +774,7 @@
         LOG.debug("image:%s", image['id'])
         return image['id']
 
-    def _log_console_output(self, servers=None, client=None):
+    def log_console_output(self, servers=None, client=None, **kwargs):
         """Console log output"""
         if not CONF.compute_feature_enabled.console_output:
             LOG.debug('Console output not supported, cannot log')
@@ -637,7 +786,7 @@
         for server in servers:
             try:
                 console_output = client.get_console_output(
-                    server['id'])['output']
+                    server['id'], **kwargs)['output']
                 LOG.debug('Console output for %s\nbody=\n%s',
                           server['id'], console_output)
             except lib_exc.NotFound:
@@ -649,7 +798,7 @@
         if not isinstance(exc, lib_exc.SSHTimeout):
             LOG.debug('Network information on a devstack host')
 
-    def create_server_snapshot(self, server, name=None):
+    def create_server_snapshot(self, server, name=None, **kwargs):
         """Creates server snapshot"""
         # Glance client
         _image_client = self.image_client
@@ -658,7 +807,7 @@
         if name is None:
             name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
         LOG.debug("Creating a snapshot image for server: %s", server['name'])
-        image = _images_client.create_image(server['id'], name=name)
+        image = _images_client.create_image(server['id'], name=name, **kwargs)
         image_id = image.response['location'].split('images/')[1]
         waiters.wait_for_image_status(_image_client, image_id, 'active')
 
@@ -697,31 +846,32 @@
                   image_name, server['name'])
         return snapshot_image
 
-    def nova_volume_attach(self, server, volume_to_attach):
+    def nova_volume_attach(self, server, volume_to_attach, **kwargs):
         """Compute volume attach
 
         This utility attaches volume from compute and waits for the
         volume status to be 'in-use' state.
         """
         volume = self.servers_client.attach_volume(
-            server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
+            server['id'], volumeId=volume_to_attach['id'],
+            **kwargs)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'in-use')
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.nova_volume_detach, server, volume)
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
     def nova_volume_detach(self, server, volume):
         """Compute volume detach
 
-        This utility detaches volume from compute and check whether the
-        volume status is 'available' state, and if not, an exception
-        will be thrown.
+        This utility detaches the volume from the server and checks whether the
+        volume attachment has been removed from Nova.
         """
         self.servers_client.detach_volume(server['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume['id'], 'available')
-        volume = self.volumes_client.show_volume(volume['id'])['volume']
+        waiters.wait_for_volume_attachment_remove_from_server(
+            self.servers_client, server['id'], volume['id'])
 
     def ping_ip_address(self, ip_address, should_succeed=True,
                         ping_timeout=None, mtu=None, server=None):
@@ -760,7 +910,7 @@
                       'result': 'expected' if result else 'unexpected'
                   })
         if server:
-            self._log_console_output([server])
+            self.log_console_output([server])
         return result
 
     def check_vm_connectivity(self, ip_address,
@@ -810,33 +960,115 @@
                 LOG.exception(extra_msg)
                 raise
 
-    def create_floating_ip(self, server, pool_name=None):
-        """Create a floating IP and associates to a server on Nova"""
+    def get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
 
-        if not pool_name:
-            pool_name = CONF.network.floating_network_name
-        floating_ip = (self.compute_floating_ips_client.
-                       create_floating_ip(pool=pool_name)['floating_ip'])
+        if ip_addr and not kwargs.get('fixed_ips'):
+            kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+        ports = self.os_admin.ports_client.list_ports(
+            device_id=server['id'], **kwargs)['ports']
+
+        # A port can have more than one IP address in some cases.
+        # If the network is dual-stack (IPv4 + IPv6), this port is associated
+        # with 2 subnets
+
+        def _is_active(port):
+            # NOTE(vsaienko) With Ironic, instances live on separate hardware
+            # servers. Neutron does not bind ports for Ironic instances, as a
+            # result the port remains in the DOWN state. This has been fixed
+            # with the introduction of the networking-baremetal plugin but
+            # it's not mandatory (and is not used on all stable branches).
+            return (port['status'] == 'ACTIVE' or
+                    port.get('binding:vnic_type') == 'baremetal')
+
+        port_map = [(p["id"], fxip["ip_address"])
+                    for p in ports
+                    for fxip in p["fixed_ips"]
+                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
+                        _is_active(p))]
+        inactive = [p for p in ports if p['status'] != 'ACTIVE']
+        if inactive:
+            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+        self.assertNotEmpty(port_map,
+                            "No IPv4 addresses found in: %s" % ports)
+        self.assertEqual(len(port_map), 1,
+                         "Found multiple IPv4 addresses: %s. "
+                         "Unable to determine which port to target."
+                         % port_map)
+        return port_map[0]
+
+    def create_floating_ip(self, server, external_network_id=None,
+                           port_id=None, client=None, **kwargs):
+        """Create a floating IP and associates to a resource/port on Neutron"""
+
+        if not external_network_id:
+            external_network_id = CONF.network.public_network_id
+        if not client:
+            client = self.floating_ips_client
+        if not port_id:
+            port_id, ip4 = self.get_server_port_id_and_ip4(server)
+        else:
+            ip4 = None
+
+        floatingip_kwargs = {
+            'floating_network_id': external_network_id,
+            'port_id': port_id,
+            'tenant_id': server.get('project_id') or server['tenant_id'],
+            'fixed_ip_address': ip4,
+        }
+        if CONF.network.subnet_id:
+            floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+        floatingip_kwargs.update(kwargs)
+        result = client.create_floatingip(**floatingip_kwargs)
+        floating_ip = result['floatingip']
+
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.compute_floating_ips_client.delete_floating_ip,
+                        client.delete_floatingip,
                         floating_ip['id'])
-        self.compute_floating_ips_client.associate_floating_ip_to_server(
-            floating_ip['ip'], server['id'])
+        return floating_ip
+
+    def associate_floating_ip(self, floating_ip, server):
+        """Associate floating ip to server
+
+        This wrapper utility attaches the floating_ip for
+        the respective port_id of server
+        """
+        port_id, _ = self.get_server_port_id_and_ip4(server)
+        kwargs = dict(port_id=port_id)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], **kwargs)['floatingip']
+        self.assertEqual(port_id, floating_ip['port_id'])
+        return floating_ip
+
+    def disassociate_floating_ip(self, floating_ip):
+        """Disassociates floating ip
+
+        This wrapper utility disassociates given floating ip.
+        :param floating_ip: a dict which is a return value of
+        floating_ips_client.create_floatingip method
+        """
+        kwargs = dict(port_id=None)
+        floating_ip = self.floating_ips_client.update_floatingip(
+            floating_ip['id'], **kwargs)['floatingip']
+        self.assertIsNone(floating_ip['port_id'])
         return floating_ip
 
     def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                         private_key=None, server=None):
+                         private_key=None, server=None, username=None,
+                         fs='ext4'):
         """Creates timestamp
 
         This wrapper utility does ssh, creates timestamp and returns the
         created timestamp.
         """
-
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
-                                            server=server)
+                                            server=server,
+                                            username=username)
+
         if dev_name is not None:
-            ssh_client.make_fs(dev_name)
+            ssh_client.make_fs(dev_name, fs=fs)
             ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
                                                                mount_path))
         cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
@@ -848,15 +1080,25 @@
         return timestamp
 
     def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                      private_key=None, server=None):
+                      private_key=None, server=None, username=None):
         """Returns timestamp
 
         This wrapper utility does ssh and returns the timestamp.
+
+        :param ip_address: The floating IP or fixed IP of the remote server
+        :param dev_name: Name of the device that stores the timestamp
+        :param mount_path: Path which should be used as mount point for
+                           dev_name
+        :param private_key: The SSH private key to use for authentication
+        :param server: Server dict, used for debugging purposes
+        :param username: Name of the Linux account on the remote server
         """
 
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
-                                            server=server)
+                                            server=server,
+                                            username=username)
+
         if dev_name is not None:
             ssh_client.mount(dev_name, mount_path)
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -865,18 +1107,23 @@
             ssh_client.exec_command('sudo umount %s' % mount_path)
         return timestamp
 
-    def get_server_ip(self, server):
+    def get_server_ip(self, server, **kwargs):
         """Get the server fixed or floating IP.
 
         Based on the configuration we're in, return a correct ip
         address for validating that a guest is up.
+
+        If CONF.validation.connect_method is floating, then
+        a floating ip will be created passing kwargs as additional
+        argument.
         """
 
         if CONF.validation.connect_method == 'floating':
             # The tests calling this method don't have a floating IP
             # and can't make use of the validation resources. So the
             # method is creating the floating IP there.
-            return self.create_floating_ip(server)['ip']
+            return self.create_floating_ip(
+                server, **kwargs)['floating_ip_address']
         elif CONF.validation.connect_method == 'fixed':
             # Determine the network name to look for based on config or creds
             # provider network resources.
@@ -916,14 +1163,14 @@
                                     keypair=None,
                                     security_group=None,
                                     delete_on_termination=False,
-                                    name=None):
+                                    name=None, **kwargs):
         """Boot instance from resource
 
         This wrapper utility boots instance from resource with block device
         mapping with source info passed in arguments
         """
 
-        create_kwargs = dict()
+        create_kwargs = dict({'image_id': ''})
         if keypair:
             create_kwargs['key_name'] = keypair['name']
         if security_group:
@@ -935,15 +1182,25 @@
             delete_on_termination=delete_on_termination))
         if name:
             create_kwargs['name'] = name
+        create_kwargs.update(kwargs)
 
-        return self.create_server(image_id='', **create_kwargs)
+        return self.create_server(**create_kwargs)
 
-    def create_volume_from_image(self):
-        """Create volume from image"""
-        img_uuid = CONF.compute.image_ref
-        vol_name = data_utils.rand_name(
-            self.__class__.__name__ + '-volume-origin')
-        return self.create_volume(name=vol_name, imageRef=img_uuid)
+    def create_volume_from_image(self, **kwargs):
+        """Create volume from image.
+
+        :param image_id: ID of the image to create volume from,
+            CONF.compute.image_ref by default
+        :param name: name of the volume,
+            '$classname-volume-origin' by default
+        :param **kwargs: additional parameters
+        """
+        image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+        name = kwargs.pop('name', None)
+        if not name:
+            namestart = self.__class__.__name__ + '-volume-origin'
+            name = data_utils.rand_name(namestart)
+        return self.create_volume(name=name, imageRef=image_id, **kwargs)
 
 
 class NetworkScenarioTest(ScenarioTest):
@@ -958,18 +1215,16 @@
 
     """
 
-    credentials = ['primary', 'admin']
-
     @classmethod
     def skip_checks(cls):
         super(NetworkScenarioTest, cls).skip_checks()
         if not CONF.service_available.neutron:
             raise cls.skipException('Neutron not available')
 
-    def _create_network(self, networks_client=None,
-                        project_id=None,
-                        namestart='network-smoke-',
-                        port_security_enabled=True, **net_dict):
+    def create_network(self, networks_client=None,
+                       project_id=None,
+                       namestart='network-smoke-',
+                       port_security_enabled=True, **net_dict):
         if not networks_client:
             networks_client = self.networks_client
         if not project_id:
@@ -1004,6 +1259,8 @@
         :Keyword Arguments:
 
             * *ip_version = ip version of the given network,
+            use_default_subnetpool = default subnetpool to
+                    manage IPv6 addresses range.
         """
 
         if not subnets_client:
@@ -1012,131 +1269,93 @@
         def cidr_in_use(cidr, project_id):
             """Check cidr existence
 
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
+            :returns: True if subnet with cidr already exist in tenant or
+                  external False else
             """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
+            tenant_subnets = self.os_admin.subnets_client.list_subnets(
                 project_id=project_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
+            external_nets = self.os_admin.networks_client.list_networks(
+                **{"router:external": True})['networks']
+            external_subnets = []
+            for ext_net in external_nets:
+                external_subnets.extend(
+                    self.os_admin.subnets_client.list_subnets(
+                        network_id=ext_net['id'], cidr=cidr)['subnets'])
+            return len(tenant_subnets + external_subnets) != 0
 
-        ip_version = kwargs.pop('ip_version', 4)
-
-        if ip_version == 6:
-            tenant_cidr = netaddr.IPNetwork(
-                CONF.network.project_network_v6_cidr)
-            num_bits = CONF.network.project_network_v6_mask_bits
-        else:
-            tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            num_bits = CONF.network.project_network_mask_bits
-
-        result = None
-        str_cidr = None
-        # Repeatedly attempt subnet creation with sequential cidr
-        # blocks until an unallocated block is found.
-        for subnet_cidr in tenant_cidr.subnet(num_bits):
-            str_cidr = str(subnet_cidr)
-            if cidr_in_use(str_cidr, project_id=network['project_id']):
-                continue
+        def _make_create_subnet_request(namestart, network,
+                                        ip_version, subnets_client, **kwargs):
 
             subnet = dict(
                 name=data_utils.rand_name(namestart),
                 network_id=network['id'],
                 project_id=network['project_id'],
-                cidr=str_cidr,
                 ip_version=ip_version,
                 **kwargs
             )
+
+            if ip_version == 6:
+                subnet['ipv6_address_mode'] = 'slaac'
+                subnet['ipv6_ra_mode'] = 'slaac'
+
             try:
-                result = subnets_client.create_subnet(**subnet)
-                break
+                return subnets_client.create_subnet(**subnet)
             except lib_exc.Conflict as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
+                if 'overlaps with another subnet' not in str(e):
                     raise
+
+        result = None
+        str_cidr = None
+
+        use_default_subnetpool = kwargs.get('use_default_subnetpool', False)
+        ip_version = kwargs.pop('ip_version', 4)
+
+        if not use_default_subnetpool:
+
+            if ip_version == 6:
+                tenant_cidr = netaddr.IPNetwork(
+                    CONF.network.project_network_v6_cidr)
+                num_bits = CONF.network.project_network_v6_mask_bits
+            else:
+                tenant_cidr = netaddr.IPNetwork(
+                    CONF.network.project_network_cidr)
+                num_bits = CONF.network.project_network_mask_bits
+
+        # Repeatedly attempt subnet creation with sequential cidr
+        # blocks until an unallocated block is found.
+            for subnet_cidr in tenant_cidr.subnet(num_bits):
+                str_cidr = str(subnet_cidr)
+                if cidr_in_use(str_cidr, project_id=network['project_id']):
+                    continue
+                result = _make_create_subnet_request(
+                    namestart, network, ip_version, subnets_client,
+                    cidr=str_cidr, **kwargs)
+
+                if result is not None:
+                    break
+
+        else:
+            result = _make_create_subnet_request(
+                namestart, network, ip_version, subnets_client,
+                **kwargs)
         self.assertIsNotNone(result, 'Unable to allocate tenant network')
 
         subnet = result['subnet']
-        self.assertEqual(subnet['cidr'], str_cidr)
+        if str_cidr is not None:
+            self.assertEqual(subnet['cidr'], str_cidr)
 
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         subnets_client.delete_subnet, subnet['id'])
 
         return subnet
 
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        if ip_addr:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'],
-                fixed_ips='ip_address=%s' % ip_addr)['ports']
-        else:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'])['ports']
-        # A port can have more than one IP address in some cases.
-        # If the network is dual-stack (IPv4 + IPv6), this port is associated
-        # with 2 subnets
-
-        def _is_active(port):
-            # NOTE(vsaienko) With Ironic, instances live on separate hardware
-            # servers. Neutron does not bind ports for Ironic instances, as a
-            # result the port remains in the DOWN state. This has been fixed
-            # with the introduction of the networking-baremetal plugin but
-            # it's not mandatory (and is not used on all stable branches).
-            return (port['status'] == 'ACTIVE' or
-                    port.get('binding:vnic_type') == 'baremetal')
-
-        port_map = [(p["id"], fxip["ip_address"])
-                    for p in ports
-                    for fxip in p["fixed_ips"]
-                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
-                        _is_active(p))]
-        inactive = [p for p in ports if p['status'] != 'ACTIVE']
-        if inactive:
-            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
-        self.assertNotEmpty(port_map,
-                            "No IPv4 addresses found in: %s" % ports)
-        self.assertEqual(len(port_map), 1,
-                         "Found multiple IPv4 addresses: %s. "
-                         "Unable to determine which port to target."
-                         % port_map)
-        return port_map[0]
-
-    def _get_network_by_name(self, network_name):
+    def get_network_by_name(self, network_name):
         net = self.os_admin.networks_client.list_networks(
             name=network_name)['networks']
         self.assertNotEmpty(net,
                             "Unable to get network by name: %s" % network_name)
         return net[0]
 
-    def create_floating_ip(self, server, external_network_id=None,
-                           port_id=None, client=None):
-        """Create a floating IP and associates to a resource/port on Neutron"""
-
-        if not external_network_id:
-            external_network_id = CONF.network.public_network_id
-        if not client:
-            client = self.floating_ips_client
-        if not port_id:
-            port_id, ip4 = self._get_server_port_id_and_ip4(server)
-        else:
-            ip4 = None
-
-        kwargs = {
-            'floating_network_id': external_network_id,
-            'port_id': port_id,
-            'tenant_id': server.get('project_id') or server['tenant_id'],
-            'fixed_ip_address': ip4,
-        }
-        if CONF.network.subnet_id:
-            kwargs['subnet_id'] = CONF.network.subnet_id
-        result = client.create_floatingip(**kwargs)
-        floating_ip = result['floatingip']
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_floatingip,
-                        floating_ip['id'])
-        return floating_ip
-
     def check_floating_ip_status(self, floating_ip, status):
         """Verifies floatingip reaches the given status
 
@@ -1187,7 +1406,7 @@
                                                should_connect=should_connect)
         except Exception as e:
             LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
+            self.log_console_output(servers_for_debug)
             self._log_net_info(e)
             raise
 
@@ -1230,169 +1449,10 @@
                 % (dest, source_host)
         else:
             msg = "%s is reachable from %s" % (dest, source_host)
-        self._log_console_output()
+        self.log_console_output()
         self.fail(msg)
 
-    def _create_security_group(self, security_group_rules_client=None,
-                               project_id=None,
-                               namestart='secgroup-smoke',
-                               security_groups_client=None):
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if project_id is None:
-            project_id = security_groups_client.project_id
-        secgroup = self._create_empty_security_group(
-            namestart=namestart, client=security_groups_client,
-            project_id=project_id)
-
-        # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(
-            security_group_rules_client=security_group_rules_client,
-            secgroup=secgroup,
-            security_groups_client=security_groups_client)
-        for rule in rules:
-            self.assertEqual(project_id, rule['project_id'])
-            self.assertEqual(secgroup['id'], rule['security_group_id'])
-        return secgroup
-
-    def _create_empty_security_group(self, client=None, project_id=None,
-                                     namestart='secgroup-smoke'):
-        """Create a security group without rules.
-
-        Default rules will be created:
-         - IPv4 egress to any
-         - IPv6 egress to any
-
-        :param project_id: secgroup will be created in this project
-        :returns: the created security group
-        """
-
-        if client is None:
-            client = self.security_groups_client
-        if not project_id:
-            project_id = client.project_id
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        sg_dict = dict(name=sg_name,
-                       description=sg_desc)
-        sg_dict['project_id'] = project_id
-        result = client.create_security_group(**sg_dict)
-
-        secgroup = result['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(project_id, secgroup['project_id'])
-        self.assertEqual(secgroup['description'], sg_desc)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_security_group, secgroup['id'])
-        return secgroup
-
-    def _create_security_group_rule(self, secgroup=None,
-                                    sec_group_rules_client=None,
-                                    project_id=None,
-                                    security_groups_client=None, **kwargs):
-        """Create a rule from a dictionary of rule parameters.
-
-        Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in project_id.
-
-        :param secgroup: the security group.
-        :param project_id: if secgroup not passed -- the tenant in which to
-            search for default secgroup
-        :param kwargs: a dictionary containing rule parameters:
-            for example, to allow incoming ssh:
-            rule = {
-                    direction: 'ingress'
-                    protocol:'tcp',
-                    port_range_min: 22,
-                    port_range_max: 22
-                    }
-        """
-
-        if sec_group_rules_client is None:
-            sec_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if not project_id:
-            project_id = security_groups_client.project_id
-        if secgroup is None:
-            # Get default secgroup for project_id
-            default_secgroups = security_groups_client.list_security_groups(
-                name='default', project_id=project_id)['security_groups']
-            msg = "No default security group for project %s." % (project_id)
-            self.assertNotEmpty(default_secgroups, msg)
-            secgroup = default_secgroups[0]
-
-        ruleset = dict(security_group_id=secgroup['id'],
-                       project_id=secgroup['project_id'])
-        ruleset.update(kwargs)
-
-        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
-        sg_rule = sg_rule['security_group_rule']
-
-        self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
-        self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
-        return sg_rule
-
-    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
-                                        secgroup=None,
-                                        security_groups_client=None):
-        """Create loginable security group rule
-
-        This function will create:
-        1. egress and ingress tcp port 22 allow rule in order to allow ssh
-        access for ipv4.
-        2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
-        3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
-        """
-
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        rules = []
-        rulesets = [
-            dict(
-                # ssh
-                protocol='tcp',
-                port_range_min=22,
-                port_range_max=22,
-            ),
-            dict(
-                # ping
-                protocol='icmp',
-            ),
-            dict(
-                # ipv6-icmp for ping6
-                protocol='icmp',
-                ethertype='IPv6',
-            )
-        ]
-        sec_group_rules_client = security_group_rules_client
-        for ruleset in rulesets:
-            for r_direction in ['ingress', 'egress']:
-                ruleset['direction'] = r_direction
-                try:
-                    sg_rule = self._create_security_group_rule(
-                        sec_group_rules_client=sec_group_rules_client,
-                        secgroup=secgroup,
-                        security_groups_client=security_groups_client,
-                        **ruleset)
-                except lib_exc.Conflict as ex:
-                    # if rule already exist - skip rule and continue
-                    msg = 'Security group rule already exists'
-                    if msg not in ex._error_string:
-                        raise ex
-                else:
-                    self.assertEqual(r_direction, sg_rule['direction'])
-                    rules.append(sg_rule)
-
-        return rules
-
-    def _get_router(self, client=None, project_id=None):
+    def get_router(self, client=None, project_id=None, **kwargs):
         """Retrieve a router for the given tenant id.
 
         If a public router has been configured, it will be returned.
@@ -1412,11 +1472,20 @@
             body = client.show_router(router_id)
             return body['router']
         elif network_id:
+            name = kwargs.pop('name', None)
+            if not name:
+                namestart = self.__class__.__name__ + '-router'
+                name = data_utils.rand_name(namestart)
+
+            ext_gw_info = kwargs.pop('external_gateway_info', None)
+            if not ext_gw_info:
+                ext_gw_info = dict(network_id=network_id)
             router = client.create_router(
-                name=data_utils.rand_name(self.__class__.__name__ + '-router'),
-                admin_state_up=True,
+                name=name,
+                admin_state_up=kwargs.get('admin_state_up', True),
                 project_id=project_id,
-                external_gateway_info=dict(network_id=network_id))['router']
+                external_gateway_info=ext_gw_info,
+                **kwargs)['router']
             self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                             client.delete_router, router['id'])
             return router
@@ -1424,10 +1493,11 @@
             raise Exception("Neither of 'public_router_id' or "
                             "'public_network_id' has been defined.")
 
-    def create_networks(self, networks_client=None,
-                        routers_client=None, subnets_client=None,
-                        project_id=None, dns_nameservers=None,
-                        port_security_enabled=True, **net_dict):
+    def setup_network_subnet_with_router(
+            self, networks_client=None,
+            routers_client=None, subnets_client=None,
+            project_id=None, dns_nameservers=None,
+            port_security_enabled=True, **net_dict):
         """Create a network with a subnet connected to a router.
 
         The baremetal driver is a special case since all nodes are
@@ -1452,18 +1522,18 @@
             if not CONF.compute.fixed_network_name:
                 m = 'fixed_network_name must be specified in config'
                 raise lib_exc.InvalidConfiguration(m)
-            network = self._get_network_by_name(
+            network = self.get_network_by_name(
                 CONF.compute.fixed_network_name)
             router = None
             subnet = None
         else:
-            network = self._create_network(
+            network = self.create_network(
                 networks_client=networks_client,
                 project_id=project_id,
                 port_security_enabled=port_security_enabled,
                 **net_dict)
-            router = self._get_router(client=routers_client,
-                                      project_id=project_id)
+            router = self.get_router(client=routers_client,
+                                     project_id=project_id)
             subnet_kwargs = dict(network=network,
                                  subnets_client=subnets_client)
             # use explicit check because empty list is a valid option
@@ -1487,8 +1557,6 @@
 class EncryptionScenarioTest(ScenarioTest):
     """Base class for encryption scenario tests"""
 
-    credentials = ['primary', 'admin']
-
     @classmethod
     def setup_clients(cls):
         super(EncryptionScenarioTest, cls).setup_clients()
@@ -1530,6 +1598,8 @@
     class.
     """
 
+    credentials = ['primary']
+
     @classmethod
     def skip_checks(cls):
         super(ObjectStorageScenarioTest, cls).skip_checks()
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index b515639..58e234f 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -51,10 +51,27 @@
         return aggregate
 
     def _get_host_name(self):
+        # Find a host that has not been added to other availability zone,
+        # for one host can't be added to different availability zones.
         svc_list = self.services_client.list_services(
             binary='nova-compute')['services']
         self.assertNotEmpty(svc_list)
-        return svc_list[0]['host']
+        hosts_available = []
+        for host in svc_list:
+            if (host['state'] == 'up' and host['status'] == 'enabled'):
+                hosts_available.append(host['host'])
+        aggregates = self.aggregates_client.list_aggregates()['aggregates']
+        hosts_in_zone = []
+        for agg in aggregates:
+            if agg['availability_zone']:
+                hosts_in_zone.extend(agg['hosts'])
+        hosts = [v for v in hosts_available if v not in hosts_in_zone]
+        if not hosts:
+            raise self.skipException("All hosts are already in other "
+                                     "availability zones, so can't add "
+                                     "host to aggregate. \nAggregates list: "
+                                     "%s" % aggregates)
+        return hosts[0]
 
     def _add_host(self, aggregate_id, host):
         aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
new file mode 100644
index 0000000..b1098fa
--- /dev/null
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -0,0 +1,141 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import html.parser
+import ssl
+from urllib import parse
+from urllib import request
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+from tempest import test
+
+CONF = config.CONF
+
+
+class HorizonHTMLParser(html.parser.HTMLParser):
+    csrf_token = None
+    region = None
+    login = None
+
+    def _find_name(self, attrs, name):
+        for attrpair in attrs:
+            if attrpair[0] == 'name' and attrpair[1] == name:
+                return True
+        return False
+
+    def _find_value(self, attrs):
+        for attrpair in attrs:
+            if attrpair[0] == 'value':
+                return attrpair[1]
+        return None
+
+    def _find_attr_value(self, attrs, attr_name):
+        for attrpair in attrs:
+            if attrpair[0] == attr_name:
+                return attrpair[1]
+        return None
+
+    def handle_starttag(self, tag, attrs):
+        if tag == 'input':
+            if self._find_name(attrs, 'csrfmiddlewaretoken'):
+                self.csrf_token = self._find_value(attrs)
+            if self._find_name(attrs, 'region'):
+                self.region = self._find_value(attrs)
+        if tag == 'form':
+            self.login = self._find_attr_value(attrs, 'action')
+
+
+class TestDashboardBasicOps(test.BaseTestCase):
+
+    """The test suite for dashboard basic operations
+
+    This is a basic scenario test:
+    * checks that the login page is available
+    * logs in as a regular user
+    * checks that the user home page loads without error
+    """
+    opener = None
+
+    credentials = ['primary']
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestDashboardBasicOps, cls).skip_checks()
+        if not CONF.service_available.horizon:
+            raise cls.skipException("Horizon support is required")
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources()
+        super(TestDashboardBasicOps, cls).setup_credentials()
+
+    def check_login_page(self):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+        self.assertIn("id_username", response.decode("utf-8"))
+
+    def user_login(self, username, password):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+
+        # Grab the CSRF token and default region
+        parser = HorizonHTMLParser()
+        parser.feed(response.decode("utf-8"))
+
+        # construct login url for dashboard, discovery accommodates non-/ web
+        # root for dashboard
+        login_url = parse.urljoin(CONF.dashboard.dashboard_url, parser.login)
+
+        # Prepare login form request
+        req = request.Request(login_url)
+        req.add_header('Content-type', 'application/x-www-form-urlencoded')
+        req.add_header('Referer', CONF.dashboard.dashboard_url)
+
+        # Pass the default domain name regardless of the auth version in order
+        # to test the scenario of when horizon is running with keystone v3
+        params = {'username': username,
+                  'password': password,
+                  'region': parser.region,
+                  'domain': CONF.auth.default_credentials_domain_name,
+                  'csrfmiddlewaretoken': parser.csrf_token}
+        self._get_opener().open(req, parse.urlencode(params).encode())
+
+    def check_home_page(self):
+        response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+        self.assertIn('Overview', response.decode("utf-8"))
+
+    def _get_opener(self):
+        if not self.opener:
+            if (CONF.dashboard.disable_ssl_certificate_validation and
+                    self._ssl_default_context_supported()):
+                ctx = ssl.create_default_context()
+                ctx.check_hostname = False
+                ctx.verify_mode = ssl.CERT_NONE
+                self.opener = request.build_opener(
+                    request.HTTPSHandler(context=ctx),
+                    request.HTTPCookieProcessor())
+            else:
+                self.opener = request.build_opener(
+                    request.HTTPCookieProcessor())
+        return self.opener
+
+    def _ssl_default_context_supported(self):
+        return (hasattr(ssl, 'create_default_context'))
+
+    @decorators.attr(type='smoke')
+    @decorators.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
+    @utils.services('dashboard')
+    def test_basic_scenario(self):
+        creds = self.os_primary.credentials
+        self.check_login_page()
+        self.user_login(creds.username, creds.password)
+        self.check_home_page()
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index fc93a5e..6ee9f28 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -30,8 +30,7 @@
     For both LUKS and cryptsetup encryption types, this test performs
     the following:
 
-    * Creates an image in Glance
-    * Boots an instance from the image
+    * Boots an instance from an image (CONF.compute.image_ref)
     * Creates an encryption type (as admin)
     * Creates a volume of that encryption type (as a regular user)
     * Attaches and detaches the encrypted volume to the instance
@@ -44,10 +43,9 @@
             raise cls.skipException('Encrypted volume attach is not supported')
 
     def launch_instance(self):
-        image = self.image_create()
         keypair = self.create_keypair()
 
-        return self.create_server(image_id=image, key_name=keypair['name'])
+        return self.create_server(key_name=keypair['name'])
 
     def attach_detach_volume(self, server, volume):
         attached_volume = self.nova_volume_attach(server, volume)
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index e7085f6..55b8d15 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
+import testtools
 
 from tempest.common import utils
 from tempest.common import waiters
@@ -20,10 +20,10 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
 from tempest.scenario import manager
 
 
-LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
 
@@ -55,6 +55,8 @@
     # https://github.com/openstack/placement/blob/master/placement/
     # db/constants.py#L16
     PLACEMENT_MAX_INT = 0x7FFFFFFF
+    BANDWIDTH_1 = 1000
+    BANDWIDTH_2 = 2000
 
     @classmethod
     def setup_clients(cls):
@@ -62,9 +64,12 @@
         cls.placement_client = cls.os_admin.placement_client
         cls.networks_client = cls.os_admin.networks_client
         cls.subnets_client = cls.os_admin.subnets_client
+        cls.ports_client = cls.os_primary.ports_client
         cls.routers_client = cls.os_adm.routers_client
         cls.qos_client = cls.os_admin.qos_client
         cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+        cls.flavors_client = cls.os_adm.flavors_client
+        cls.servers_client = cls.os_adm.servers_client
 
     @classmethod
     def skip_checks(cls):
@@ -74,6 +79,10 @@
                   "placement based QoS allocation."
             raise cls.skipException(msg)
 
+    def setUp(self):
+        super(MinBwAllocationPlacementTest, self).setUp()
+        self._check_if_allocation_is_possible()
+
     def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
         policy = self.qos_client.create_qos_policy(
             name=data_utils.rand_name(name_prefix),
@@ -93,7 +102,7 @@
 
         return policy
 
-    def _create_qos_policies(self):
+    def _create_qos_basic_policies(self):
         self.qos_policy_valid = self._create_policy_and_min_bw_rule(
             name_prefix='test_policy_valid',
             min_kbps=self.SMALLEST_POSSIBLE_BW)
@@ -101,12 +110,25 @@
             name_prefix='test_policy_not_valid',
             min_kbps=self.PLACEMENT_MAX_INT)
 
-    def _create_network_and_qos_policies(self):
+    def _create_qos_policies_from_life(self):
+        # For tempest-slow the max bandwidth configured is 1000000,
+        # https://opendev.org/openstack/tempest/src/branch/master/
+        # .zuul.yaml#L416-L420
+        self.qos_policy_1 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_1',
+            min_kbps=self.BANDWIDTH_1
+        )
+        self.qos_policy_2 = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_2',
+            min_kbps=self.BANDWIDTH_2
+        )
+
+    def _create_network_and_qos_policies(self, policy_method):
         physnet_name = CONF.network_feature_enabled.qos_placement_physnet
         base_segm = \
             CONF.network_feature_enabled.provider_net_base_segmentation_id
 
-        self.prov_network, _, _ = self.create_networks(
+        self.prov_network, _, _ = self.setup_network_subnet_with_router(
             networks_client=self.networks_client,
             routers_client=self.routers_client,
             subnets_client=self.subnets_client,
@@ -117,15 +139,18 @@
                 'provider:segmentation_id': base_segm
             })
 
-        self._create_qos_policies()
+        policy_method()
 
     def _check_if_allocation_is_possible(self):
         alloc_candidates = self.placement_client.list_allocation_candidates(
             resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
                                   self.SMALLEST_POSSIBLE_BW))
         if len(alloc_candidates['provider_summaries']) == 0:
-            self.fail('No allocation candidates are available for %s:%s' %
-                      (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+            # Skip if the backend does not support QoS minimum bandwidth
+            # allocation in Placement API
+            raise self.skipException(
+                'No allocation candidates are available for %s:%s' %
+                (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
 
         # Just to be sure check with impossible high (placement max_int),
         # allocation
@@ -136,8 +161,43 @@
             self.fail('For %s:%s there should be no available candidate!' %
                       (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
 
+    def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+        wait_until = (None if status == 'ERROR' else status)
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+        server = self.create_server(networks=[{'port': port['id']}],
+                                    wait_until=wait_until)
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status=status, ready_wait=False, raise_on_error=False)
+        return server, port
+
+    def _assert_allocation_is_as_expected(self, consumer, port_ids,
+                                          min_kbps=SMALLEST_POSSIBLE_BW):
+        allocations = self.placement_client.list_allocations(
+            consumer)['allocations']
+        self.assertGreater(len(allocations), 0)
+        bw_resource_in_alloc = False
+        for rp, resources in allocations.items():
+            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+                self.assertEqual(
+                    min_kbps,
+                    resources['resources'][self.INGRESS_RESOURCE_CLASS])
+                bw_resource_in_alloc = True
+                allocation_rp = rp
+        if min_kbps:
+            self.assertTrue(bw_resource_in_alloc)
+
+            # Check binding_profile of the port is not empty and equals with
+            # the rp uuid
+            for port_id in port_ids:
+                port = self.os_admin.ports_client.show_port(port_id)
+                self.assertEqual(
+                    allocation_rp,
+                    port['port']['binding:profile']['allocation'])
+
     @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
-    @decorators.attr(type='slow')
     @utils.services('compute', 'network')
     def test_qos_min_bw_allocation_basic(self):
         """"Basic scenario with QoS min bw allocation in placement.
@@ -158,38 +218,270 @@
         * Create port with invalid QoS policy, and try to boot VM with that,
         it should fail.
         """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server1, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server1['id'],
+                                               [valid_port['id']])
 
-        self._check_if_allocation_is_possible()
-
-        self._create_network_and_qos_policies()
-
-        valid_port = self.create_port(
-            self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
-
-        server1 = self.create_server(
-            networks=[{'port': valid_port['id']}])
-        allocations = self.placement_client.list_allocations(server1['id'])
-
-        self.assertGreater(len(allocations['allocations']), 0)
-        bw_resource_in_alloc = False
-        for rp, resources in allocations['allocations'].items():
-            if self.INGRESS_RESOURCE_CLASS in resources['resources']:
-                bw_resource_in_alloc = True
-        self.assertTrue(bw_resource_in_alloc)
-
-        # boot another vm with max int bandwidth
-        not_valid_port = self.create_port(
-            self.prov_network['id'],
-            qos_policy_id=self.qos_policy_not_valid['id'])
-        server2 = self.create_server(
-            wait_until=None,
-            networks=[{'port': not_valid_port['id']}])
-        waiters.wait_for_server_status(
-            client=self.os_primary.servers_client, server_id=server2['id'],
-            status='ERROR', ready_wait=False, raise_on_error=False)
+        server2, not_valid_port = self._boot_vm_with_min_bw(
+            self.qos_policy_not_valid['id'], status='ERROR')
         allocations = self.placement_client.list_allocations(server2['id'])
 
         self.assertEqual(0, len(allocations['allocations']))
         server2 = self.servers_client.show_server(server2['id'])
         self.assertIn('fault', server2['server'])
         self.assertIn('No valid host', server2['server']['fault']['message'])
+        # Check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(not_valid_port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
+
+    @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration is not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network')
+    def test_migrate_with_qos_min_bw_allocation(self):
+        """Scenario to migrate VM with QoS min bw allocation in placement
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * migrate the server
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.servers_client.migrate_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+    @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+                          'Resize not available.')
+    @utils.services('compute', 'network')
+    def test_resize_with_qos_min_bw_allocation(self):
+        """Scenario to resize VM with QoS min bw allocation in placement.
+
+        Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+        checks, and
+        * resize the server with new flavor
+        * confirm the resize, if the VM state is VERIFY_RESIZE
+        * If the VM goes to ACTIVE state check that allocations are as
+        expected.
+        """
+        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        server, valid_port = self._boot_vm_with_min_bw(
+            qos_policy_id=self.qos_policy_valid['id'])
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        old_flavor = self.flavors_client.show_flavor(
+            CONF.compute.flavor_ref)['flavor']
+        new_flavor = self.flavors_client.create_flavor(**{
+            'ram': old_flavor['ram'],
+            'vcpus': old_flavor['vcpus'],
+            'name': old_flavor['name'] + 'extra',
+            'disk': old_flavor['disk'] + 1
+        })['flavor']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.flavors_client.delete_flavor, new_flavor['id'])
+
+        self.servers_client.resize_server(
+            server_id=server['id'], flavor_ref=new_flavor['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+        # TODO(lajoskatona): Check that the allocations are ok for the
+        #  migration?
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+        self.servers_client.confirm_resize_server(server_id=server['id'])
+        waiters.wait_for_server_status(
+            client=self.os_primary.servers_client, server_id=server['id'],
+            status='ACTIVE', ready_wait=False, raise_on_error=True)
+        self._assert_allocation_is_as_expected(server['id'],
+                                               [valid_port['id']])
+
+    @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy(self):
+        """Test the update of QoS policy on bound port
+
+        Related RFE in neutron: #1882804
+        The scenario is the following:
+        * Have a port with QoS policy and minimum bandwidth rule.
+        * Boot a VM with the port.
+        * Update the port with a new policy with different minimum bandwidth
+        values.
+        * The allocation on placement side should be according to the new
+        rules.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_2)
+
+        # I changed my mind
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': self.qos_policy_1['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        # bad request....
+        self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+            name_prefix='test_policy_not_valid',
+            min_kbps=self.PLACEMENT_MAX_INT)
+        port_orig = self.ports_client.show_port(port['id'])['port']
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.ports_client.update_port,
+            port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        port_upd = self.ports_client.show_port(port['id'])['port']
+        self.assertEqual(port_orig['qos_policy_id'],
+                         port_upd['qos_policy_id'])
+        self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
+
+    @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_from_zero(self):
+        """Test port without QoS policy to have QoS policy
+
+        This scenario checks if updating a port without QoS policy to
+        have QoS policy with minimum_bandwidth rule succeeds only on
+        controlplane, but placement allocation remains 0.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(self.prov_network['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+        self.ports_client.update_port(
+            port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_policy_to_zero(self):
+        """Test port with QoS policy to remove QoS policy
+
+        In this scenario port with QoS minimum_bandwidth rule update to
+        remove QoS policy results in 0 placement allocation.
+        """
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+
+        self.ports_client.update_port(
+            port['id'],
+            **{'qos_policy_id': None})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+    @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
+    @utils.services('compute', 'network')
+    def test_qos_min_bw_allocation_update_with_multiple_ports(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port1 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+        port2 = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port1['id']}, {'port': port2['id']}])
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            self.BANDWIDTH_1 + self.BANDWIDTH_2)
+
+        self.ports_client.update_port(
+            port1['id'],
+            **{'qos_policy_id': self.qos_policy_2['id']})
+        self._assert_allocation_is_as_expected(
+            server1['id'], [port1['id'], port2['id']],
+            2 * self.BANDWIDTH_2)
+
+    @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
+    @utils.services('compute', 'network')
+    def test_empty_update(self):
+        if not utils.is_network_feature_enabled('update_port_qos'):
+            raise self.skipException("update_port_qos feature is not enabled")
+
+        self._create_network_and_qos_policies(
+            self._create_qos_policies_from_life)
+
+        port = self.create_port(
+            self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+        server1 = self.create_server(
+            networks=[{'port': port['id']}])
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
+        self.ports_client.update_port(
+            port['id'],
+            **{'description': 'foo'})
+        self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+                                               self.BANDWIDTH_1)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index fe42583..2c981c8 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -78,7 +78,7 @@
         self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
 
     def create_and_add_security_group_to_server(self, server):
-        secgroup = self._create_security_group()
+        secgroup = self.create_security_group()
         self.servers_client.add_security_group(server['id'],
                                                name=secgroup['name'])
         self.addCleanup(self.servers_client.remove_security_group,
@@ -100,7 +100,7 @@
         for addresses in server['addresses'].values():
             for address in addresses:
                 if (address['OS-EXT-IPS:type'] == 'floating' and
-                        address['addr'] == floating_ip['ip']):
+                        address['addr'] == floating_ip['floating_ip_address']):
                     return address
 
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@@ -129,7 +129,9 @@
         server = self.servers_client.show_server(server['id'])['server']
         if (CONF.network_feature_enabled.floating_ips and
             CONF.network.floating_network_name):
-            floating_ip = self.create_floating_ip(server)
+            fip = self.create_floating_ip(server)
+            floating_ip = self.associate_floating_ip(
+                fip, server)
             # fetch the server again to make sure the addresses were refreshed
             # after associating the floating IP
             server = self.servers_client.show_server(server['id'])['server']
@@ -138,8 +140,8 @@
             self.assertIsNotNone(
                 address,
                 "Failed to find floating IP '%s' in server addresses: %s" %
-                (floating_ip['ip'], server['addresses']))
-            ssh_ip = floating_ip['ip']
+                (floating_ip['floating_ip_address'], server['addresses']))
+            ssh_ip = floating_ip['floating_ip_address']
         else:
             ssh_ip = self.get_server_ip(server)
 
@@ -162,8 +164,7 @@
 
         if floating_ip:
             # delete the floating IP, this should refresh the server addresses
-            self.compute_floating_ips_client.delete_floating_ip(
-                floating_ip['id'])
+            self.disassociate_floating_ip(floating_ip)
 
             def is_floating_ip_detached_from_server():
                 server_info = self.servers_client.show_server(
@@ -177,5 +178,6 @@
                 CONF.compute.build_timeout,
                 CONF.compute.build_interval):
                 msg = ("Floating IP '%s' should not be in server addresses: %s"
-                       % (floating_ip['ip'], server['addresses']))
+                       % (floating_ip['floating_ip_address'],
+                          server['addresses']))
                 raise exceptions.TimeoutException(msg)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e26dc9d..b48ac3c 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -60,9 +60,9 @@
     def _setup_server(self, keypair):
         security_groups = []
         if utils.is_extension_enabled('security-group', 'network'):
-            security_group = self._create_security_group()
+            security_group = self.create_security_group()
             security_groups = [{'name': security_group['name']}]
-        network, _, _ = self.create_networks()
+        network, _, _ = self.setup_network_subnet_with_router()
         server = self.create_server(
             networks=[{'uuid': network['id']}],
             key_name=keypair['name'],
@@ -80,8 +80,8 @@
         return floating_ip
 
     def _check_network_connectivity(self, server, keypair, floating_ip,
-                                    should_connect=True):
-        username = CONF.validation.image_ssh_user
+                                    should_connect=True,
+                                    username=CONF.validation.image_ssh_user):
         private_key = keypair['private_key']
         self.check_tenant_network_connectivity(
             server, username, private_key,
@@ -95,12 +95,13 @@
                                    'Public network connectivity check failed',
                                    server)
 
-    def _wait_server_status_and_check_network_connectivity(self, server,
-                                                           keypair,
-                                                           floating_ip):
+    def _wait_server_status_and_check_network_connectivity(
+        self, server, keypair, floating_ip,
+        username=CONF.validation.image_ssh_user):
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
-        self._check_network_connectivity(server, keypair, floating_ip)
+        self._check_network_connectivity(server, keypair, floating_ip,
+                                         username=username)
 
     @decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
     @decorators.attr(type='slow')
@@ -137,10 +138,11 @@
         server = self._setup_server(keypair)
         floating_ip = self._setup_network(server, keypair)
         image_ref_alt = CONF.compute.image_ref_alt
+        username_alt = CONF.validation.image_alt_ssh_user
         self.servers_client.rebuild_server(server['id'],
                                            image_ref=image_ref_alt)
         self._wait_server_status_and_check_network_connectivity(
-            server, keypair, floating_ip)
+            server, keypair, floating_ip, username_alt)
 
     @decorators.idempotent_id('2b2642db-6568-4b35-b812-eceed3fa20ce')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 6c1b3fa..add5c32 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -106,7 +106,8 @@
 
     def _setup_network_and_servers(self, **kwargs):
         boot_with_port = kwargs.pop('boot_with_port', False)
-        self.network, self.subnet, self.router = self.create_networks(**kwargs)
+        self.network, self.subnet, self.router = (
+            self.setup_network_subnet_with_router(**kwargs))
         self.check_networks()
 
         self.ports = []
@@ -159,7 +160,7 @@
         keypair = self.create_keypair()
         self.keypairs[keypair['name']] = keypair
         security_groups = [
-            {'name': self._create_security_group()['name']}
+            {'name': self.create_security_group()['name']}
         ]
         network = {'uuid': network['id']}
         if port_id is not None:
@@ -223,14 +224,14 @@
         floating_ip, server = self.floating_ip_tuple
         # create a new server for the floating ip
         server = self._create_server(self.network)
-        port_id, _ = self._get_server_port_id_and_ip4(server)
+        port_id, _ = self.get_server_port_id_and_ip4(server)
         floating_ip = self.floating_ips_client.update_floatingip(
             floating_ip['id'], port_id=port_id)['floatingip']
         self.assertEqual(port_id, floating_ip['port_id'])
         self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
 
     def _create_new_network(self, create_gateway=False):
-        self.new_net = self._create_network()
+        self.new_net = self.create_network()
         if create_gateway:
             self.new_subnet = self.create_subnet(
                 network=self.new_net)
@@ -623,6 +624,13 @@
         ssh_client = self.get_remote_client(
             ip_address, private_key=private_key, server=server)
 
+        # NOTE: Server needs to renew its dhcp lease in order to get new
+        # definitions from subnet
+        # NOTE(amuller): we are renewing the lease as part of the retry
+        # because Neutron updates dnsmasq asynchronously after the
+        # subnet-update API call returns.
+        ssh_client.renew_lease(fixed_ip=floating_ip['fixed_ip_address'],
+                               dhcp_client=CONF.scenario.dhcp_client)
         dns_servers = [initial_dns_server]
         servers = ssh_client.get_dns_servers()
         self.assertEqual(set(dns_servers), set(servers),
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 14f24c7..4f5118b 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -66,7 +66,7 @@
     def setUp(self):
         super(TestGettingAddress, self).setUp()
         self.keypair = self.create_keypair()
-        self.sec_grp = self._create_security_group()
+        self.sec_grp = self.create_security_group()
 
     def prepare_network(self, address6_mode, n_subnets6=1, dualnet=False):
         """Prepare network
@@ -77,15 +77,15 @@
         if dualnet - create IPv6 subnets on a different network
         :return: list of created networks
         """
-        network = self._create_network()
+        network = self.create_network()
         if dualnet:
-            network_v6 = self._create_network()
+            network_v6 = self.create_network()
 
         sub4 = self.create_subnet(network=network,
                                   namestart='sub4',
                                   ip_version=4)
 
-        router = self._get_router()
+        router = self.get_router()
         self.routers_client.add_router_interface(router['id'],
                                                  subnet_id=sub4['id'])
 
@@ -218,7 +218,7 @@
                     guest_has_address,
                     CONF.validation.ping_timeout, 1, ssh, ip)
                 if not result:
-                    self._log_console_output(servers=[srv])
+                    self.log_console_output(servers=[srv])
                     self.fail(
                         'Address %s not configured for instance %s, '
                         'ip address output is\n%s' %
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 3fc93e4..aff7509 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -197,14 +197,14 @@
         tenant.keypair = keypair
 
     def _create_tenant_security_groups(self, tenant):
-        access_sg = self._create_empty_security_group(
+        access_sg = self.create_empty_security_group(
             namestart='secgroup_access-',
             project_id=tenant.creds.project_id,
             client=tenant.manager.security_groups_client
         )
 
         # don't use default secgroup since it allows in-project traffic
-        def_sg = self._create_empty_security_group(
+        def_sg = self.create_empty_security_group(
             namestart='secgroup_general-',
             project_id=tenant.creds.project_id,
             client=tenant.manager.security_groups_client
@@ -217,7 +217,7 @@
             direction='ingress',
         )
         sec_group_rules_client = tenant.manager.security_group_rules_client
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=access_sg,
             sec_group_rules_client=sec_group_rules_client,
             **ssh_rule)
@@ -326,7 +326,7 @@
         self.floating_ips.setdefault(server['id'], floating_ip)
 
     def _create_tenant_network(self, tenant, port_security_enabled=True):
-        network, subnet, router = self.create_networks(
+        network, subnet, router = self.setup_network_subnet_with_router(
             networks_client=tenant.manager.networks_client,
             routers_client=tenant.manager.routers_client,
             subnets_client=tenant.manager.subnets_client,
@@ -385,7 +385,7 @@
             remote_group_id=tenant.security_groups['default']['id'],
             direction='ingress'
         )
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=tenant.security_groups['default'],
             security_groups_client=tenant.manager.security_groups_client,
             **ruleset
@@ -413,7 +413,7 @@
         protocol = ruleset['protocol']
         sec_group_rules_client = (
             dest_tenant.manager.security_group_rules_client)
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=dest_tenant.security_groups['default'],
             sec_group_rules_client=sec_group_rules_client,
             **ruleset
@@ -429,7 +429,7 @@
         # allow reverse traffic and check
         sec_group_rules_client = (
             source_tenant.manager.security_group_rules_client)
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=source_tenant.security_groups['default'],
             sec_group_rules_client=sec_group_rules_client,
             **ruleset
@@ -464,9 +464,9 @@
     def _log_console_output_for_all_tenants(self):
         for tenant in self.tenants.values():
             client = tenant.manager.servers_client
-            self._log_console_output(servers=tenant.servers, client=client)
+            self.log_console_output(servers=tenant.servers, client=client)
             if tenant.access_point is not None:
-                self._log_console_output(
+                self.log_console_output(
                     servers=[tenant.access_point], client=client)
 
     def _create_protocol_ruleset(self, protocol, port=80):
@@ -534,7 +534,7 @@
         new_tenant = self.primary_tenant
 
         # Create empty security group and add icmp rule in it
-        new_sg = self._create_empty_security_group(
+        new_sg = self.create_empty_security_group(
             namestart='secgroup_new-',
             project_id=new_tenant.creds.project_id,
             client=new_tenant.manager.security_groups_client)
@@ -543,7 +543,7 @@
             direction='ingress',
         )
         sec_group_rules_client = new_tenant.manager.security_group_rules_client
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=new_sg,
             sec_group_rules_client=sec_group_rules_client,
             **icmp_rule)
@@ -596,7 +596,7 @@
             protocol='icmp',
             direction='ingress'
         )
-        self._create_security_group_rule(
+        self.create_security_group_rule(
             secgroup=tenant.security_groups['default'],
             **ruleset
         )
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8aa729b..990b325 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -37,7 +37,7 @@
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources()
+        cls.set_network_resources(network=True, subnet=True)
         super(TestServerAdvancedOps, cls).setup_credentials()
 
     @decorators.attr(type='slow')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 02bc692..2a15470 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -52,7 +52,9 @@
             # Obtain a floating IP if floating_ips is enabled
             if (CONF.network_feature_enabled.floating_ips and
                 CONF.network.floating_network_name):
-                self.ip = self.create_floating_ip(self.instance)['ip']
+                fip = self.create_floating_ip(self.instance)
+                self.ip = self.associate_floating_ip(
+                    fip, self.instance)['floating_ip_address']
             else:
                 server = self.servers_client.show_server(
                     self.instance['id'])['server']
@@ -67,7 +69,10 @@
     def verify_metadata(self):
         if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
             # Verify metadata service
-            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+            if CONF.network.public_network_id:
+                md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+            else:
+                md_url = 'http://169.254.169.254/latest/meta-data/local-ipv4'
 
             def exec_cmd_and_verify_output():
                 cmd = 'curl ' + md_url
@@ -125,7 +130,7 @@
     @utils.services('compute', 'network')
     def test_server_basic_ops(self):
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
         self.md = {'meta1': 'data1', 'meta2': 'data2', 'metaN': 'dataN'}
         self.instance = self.create_server(
             key_name=keypair['name'],
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..29612ec 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -33,9 +33,18 @@
      * shelve the instance
      * unshelve the instance
      * check the existence of the timestamp file in the unshelved instance
+     * check the existence of the timestamp file in the unshelved instance,
+       after a cold migrate
 
     """
 
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(TestShelveInstance, cls).setup_clients()
+        cls.admin_servers_client = cls.os_admin.servers_client
+
     @classmethod
     def skip_checks(cls):
         super(TestShelveInstance, cls).skip_checks()
@@ -50,10 +59,24 @@
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
 
-    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
+    def _cold_migrate_server(self, server):
+        src_host = self.get_host_for_server(server['id'])
+
+        self.admin_servers_client.migrate_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+
+        dst_host = self.get_host_for_server(server['id'])
+        self.assertNotEqual(src_host, dst_host)
+
+    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False,
+                                                cold_migrate=False):
         keypair = self.create_keypair()
 
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
         security_groups = [{'name': security_group['name']}]
 
         server = self.create_server(
@@ -71,6 +94,10 @@
         # with the instance snapshot
         self._shelve_then_unshelve_server(server)
 
+        if cold_migrate:
+            # Prevent bug #1732428 from coming back
+            self._cold_migrate_server(server)
+
         timestamp2 = self.get_timestamp(instance_ip,
                                         private_key=keypair['private_key'],
                                         server=server)
@@ -91,3 +118,18 @@
     @utils.services('compute', 'volume', 'network', 'image')
     def test_shelve_volume_backed_instance(self):
         self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('1295fd9e-193a-4cf8-b211-55358e021bae')
+    @testtools.skipUnless(CONF.network.public_network_id,
+                          'The public_network_id option must be specified.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.shelve_migrate,
+                          'Shelve migrate not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network', 'image')
+    def test_cold_migrate_unshelved_instance(self):
+        self._create_server_then_shelve_and_unshelve(cold_migrate=True)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index a062d40..d04cb9a 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -50,7 +50,7 @@
     def test_snapshot_pattern(self):
         # prepare for booting an instance
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # boot an instance and create a timestamp file in it
         server = self.create_server(
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c3b3670..4b81b9e 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from oslo_log import log as logging
 import testtools
 
 from tempest.common import utils
@@ -24,7 +23,6 @@
 from tempest.scenario import manager
 
 CONF = config.CONF
-LOG = logging.getLogger(__name__)
 
 
 class TestStampPattern(manager.ScenarioTest):
@@ -83,7 +81,7 @@
     def test_stamp_pattern(self):
         # prepare for booting an instance
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # boot an instance and create a timestamp file in it
         volume = self.create_volume()
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index 8a8c54e..d0885cf 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -70,7 +70,7 @@
 
         # Create keypair and security group
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # Boot a server from the restored backup
         bd_map_v2 = [{
@@ -84,11 +84,11 @@
                                     security_groups=[
                                         {'name': security_group['name']}])
 
-        # Create a floating ip
-        floating_ip = self.create_floating_ip(server)
-
+        # Create a floating ip and associate it to server.
+        fip = self.create_floating_ip(server)
+        floating_ip = self.associate_floating_ip(fip, server)
         # Check server connectivity
-        self.check_vm_connectivity(floating_ip['ip'],
+        self.check_vm_connectivity(floating_ip['floating_ip_address'],
                                    username=CONF.validation.image_ssh_user,
                                    private_key=keypair['private_key'],
                                    should_connect=True)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 3b4bbda..5a5cc27 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -64,7 +64,7 @@
 
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # create an instance from volume
         LOG.info("Booting instance 1 from volume")
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index 106500e..57d2a1a 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -100,7 +100,7 @@
     def test_volume_retype_attached(self):
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         # create volume types
         LOG.info("Creating Volume types")
@@ -156,7 +156,7 @@
     def test_volume_migrate_attached(self):
         LOG.info("Creating keypair and security group")
         keypair = self.create_keypair()
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
 
         LOG.info("Creating volume")
         # Create a unique volume type to avoid using the backend default
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 9fec548..0d7720e 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -15,9 +15,9 @@
 
 import re
 import time
+from urllib import parse as urllib
 
 from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
 
 from tempest import exceptions
 from tempest.lib.common import rest_client
diff --git a/tempest/test.py b/tempest/test.py
index f383bc1..8ea3b16 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -20,7 +20,6 @@
 import debtcollector.moves
 import fixtures
 from oslo_log import log as logging
-import six
 import testtools
 
 from tempest import clients
@@ -38,12 +37,6 @@
 
 CONF = config.CONF
 
-# TODO(oomichi): This test.idempotent_id should be removed after all projects
-# switch to use decorators.idempotent_id.
-idempotent_id = debtcollector.moves.moved_function(
-    decorators.idempotent_id, 'idempotent_id', __name__,
-    version='Mitaka', removal_version='?')
-
 
 attr = debtcollector.moves.moved_function(
     decorators.attr, 'attr', __name__,
@@ -185,7 +178,7 @@
                      etype, cls.__name__)
             cls.tearDownClass()
             try:
-                six.reraise(etype, value, trace)
+                raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
         finally:
@@ -239,7 +232,7 @@
         # the first one
         if re_raise and etype is not None:
             try:
-                six.reraise(etype, value, trace)
+                raise value.with_traceback(trace)
             finally:
                 del trace  # to avoid circular refs
 
@@ -302,6 +295,7 @@
         identity_version = cls.get_identity_version()
         # setting force_tenant_isolation to True also needs admin credentials.
         if ('admin' in cls.credentials or
+                'alt_admin' in cls.credentials or
                 getattr(cls, 'force_tenant_isolation', False)):
             if not credentials.is_admin_available(
                     identity_version=identity_version):
@@ -389,7 +383,7 @@
             # This may raise an exception in case credentials are not available
             # In that case we want to let the exception through and the test
             # fail accordingly
-            if isinstance(credentials_type, six.string_types):
+            if isinstance(credentials_type, str):
                 manager = cls.get_client_manager(
                     credential_type=credentials_type)
                 setattr(cls, 'os_%s' % credentials_type, manager)
@@ -420,8 +414,18 @@
                             'alt_manager', 'os_alt', version='Pike',
                             removal_version='Queens')
             elif isinstance(credentials_type, list):
+                scope = 'project'
+                if credentials_type[0].startswith('system'):
+                    scope = 'system'
+                elif credentials_type[0].startswith('domain'):
+                    scope = 'domain'
                 manager = cls.get_client_manager(roles=credentials_type[1:],
-                                                 force_new=True)
+                                                 force_new=True,
+                                                 scope=scope)
+                setattr(cls, 'os_%s' % credentials_type[0], manager)
+                # TODO(gmann): Setting the old style attribute too for
+                # backward compatibility but at some point we should
+                # remove this.
                 setattr(cls, 'os_roles_%s' % credentials_type[0], manager)
 
     @classmethod
@@ -663,7 +667,7 @@
 
     @classmethod
     def get_client_manager(cls, credential_type=None, roles=None,
-                           force_new=None):
+                           force_new=None, scope=None):
         """Returns an OpenStack client manager
 
         Returns an OpenStack client manager based on either credential_type
@@ -671,6 +675,7 @@
         credential_type 'primary'
         :param credential_type: string - primary, alt or admin
         :param roles: list of roles
+        :param scope: scope for the test user
 
         :returns: the created client manager
         :raises skipException: if the requested credentials are not available
@@ -689,7 +694,7 @@
                         " is not able to provide credentials with the %s role "
                         "assigned." % (cls.__name__, role))
                     raise cls.skipException(skip_msg)
-            params = dict(roles=roles)
+            params = dict(roles=roles, scope=scope)
             if force_new is not None:
                 params.update(force_new=force_new)
             creds = cred_provider.get_creds_by_roles(**params)
@@ -853,10 +858,16 @@
         """
         # Get a manager for the given credentials_type, but at least
         # always fall back on getting the manager for primary credentials
-        if isinstance(credentials_type, six.string_types):
+        if isinstance(credentials_type, str):
             manager = cls.get_client_manager(credential_type=credentials_type)
         elif isinstance(credentials_type, list):
-            manager = cls.get_client_manager(roles=credentials_type[1:])
+            scope = 'project'
+            if credentials_type[0].startswith('system'):
+                scope = 'system'
+            elif credentials_type[0].startswith('domain'):
+                scope = 'domain'
+            manager = cls.get_client_manager(roles=credentials_type[1:],
+                                             scope=scope)
         else:
             manager = cls.get_client_manager()
 
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index b20b60e..1d69d9d 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -15,7 +15,6 @@
 import abc
 
 from oslo_log import log as logging
-import six
 import stevedore
 
 from tempest.lib.common.utils import misc
@@ -24,8 +23,7 @@
 LOG = logging.getLogger(__name__)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class TempestPlugin(object):
+class TempestPlugin(object, metaclass=abc.ABCMeta):
     """Provide basic hooks for an external plugin
 
     To provide tempest the necessary information to run the plugin.
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 143c6e1..5816ab1 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -30,8 +30,8 @@
     base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
     base_path = os.path.split(base_path)[0]
     # Load local tempest tests
-    for test_dir in ['tempest/api', 'tempest/scenario']:
-        full_test_dir = os.path.join(base_path, test_dir)
+    for test_dir in ['api', 'scenario']:
+        full_test_dir = os.path.join(base_path, 'tempest', test_dir)
         if not pattern:
             suite.addTests(loader.discover(full_test_dir,
                                            top_level_dir=base_path))
diff --git a/tempest/tests/api/compute/test_base.py b/tempest/tests/api/compute/test_base.py
index 74d2625..8a1873b 100644
--- a/tempest/tests/api/compute/test_base.py
+++ b/tempest/tests/api/compute/test_base.py
@@ -15,7 +15,6 @@
 from unittest import mock
 
 from oslo_utils import uuidutils
-import six
 
 from tempest.api.compute import base as compute_base
 from tempest.common import waiters
@@ -128,9 +127,9 @@
             mock.sentinel.server_id, wait_until='active')
         # make our assertions
         if fault:
-            self.assertIn(fault, six.text_type(ex))
+            self.assertIn(fault, str(ex))
         else:
-            self.assertNotIn(fault, six.text_type(ex))
+            self.assertNotIn(fault, str(ex))
         if compute_base.BaseV2ComputeTest.is_requested_microversion_compatible(
             '2.35'):
             status = 'ACTIVE'
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index fc44793..2301be6 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -143,40 +143,40 @@
 
     saved_state = {
         # Static list to ensure global service saved items are not deleted
-        "users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
-        "flavors": {u'42': u'm1.tiny'},
-        "images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
-        "roles": {u'3efrt74r45hn': u'president'},
-        "projects": {u'f38ohgp93jj032': u'manhattan'},
-        "domains": {u'default': u'Default'},
+        "users": {'32rwef64245tgr20121qw324bgg': 'Lightning'},
+        "flavors": {'42': 'm1.tiny'},
+        "images": {'34yhwr-4t3q': 'stratus-0.3.2-x86_64-disk'},
+        "roles": {'3efrt74r45hn': 'president'},
+        "projects": {'f38ohgp93jj032': 'manhattan'},
+        "domains": {'default': 'Default'},
         # Static list to ensure project service saved items are not deleted
-        "snapshots": {u'1ad4c789-7e8w-4dwg-afc5': u'saved-snapshot'},
-        "servers": {u'7a6d4v7w-36ds-4216': u'saved-server'},
-        "server_groups": {u'as6d5f7g-46ca-475e': u'saved-server-group'},
-        "keypairs": {u'saved-key-pair': {
-            u'fingerprint': u'7e:eb:ab:24',
-            u'name': u'saved-key-pair'
+        "snapshots": {'1ad4c789-7e8w-4dwg-afc5': 'saved-snapshot'},
+        "servers": {'7a6d4v7w-36ds-4216': 'saved-server'},
+        "server_groups": {'as6d5f7g-46ca-475e': 'saved-server-group'},
+        "keypairs": {'saved-key-pair': {
+            'fingerprint': '7e:eb:ab:24',
+            'name': 'saved-key-pair'
         }},
-        "volumes": {u'aa77asdf-1234': u'saved-volume'},
-        "networks": {u'6722fc13-4319': {
-            u'id': u'6722fc13-4319',
-            u'name': u'saved-network'
+        "volumes": {'aa77asdf-1234': 'saved-volume'},
+        "networks": {'6722fc13-4319': {
+            'id': '6722fc13-4319',
+            'name': 'saved-network'
         }},
-        "floatingips": {u'9e82d248-408a': {
-            u'id': u'9e82d248-408a',
-            u'status': u'ACTIVE'
+        "floatingips": {'9e82d248-408a': {
+            'id': '9e82d248-408a',
+            'status': 'ACTIVE'
         }},
-        "routers": {u'4s5w34hj-id44': u'saved-router'},
-        "metering_label_rules": {u'93a973ce-4dc5': {
-            u'direction': u'ingress',
-            u'id': u'93a973ce-4dc5'
+        "routers": {'4s5w34hj-id44': 'saved-router'},
+        "metering_label_rules": {'93a973ce-4dc5': {
+            'direction': 'ingress',
+            'id': '93a973ce-4dc5'
         }},
-        "metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
-        "ports": {u'aa74aa4v-741a': u'saved-port'},
-        "security_groups": {u'7q844add-3697': u'saved-sec-group'},
-        "subnets": {u'55ttda4a-2584': u'saved-subnet'},
-        "subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'},
-        "regions": {u'RegionOne': {}}
+        "metering_labels": {'723b346ce866-4c7q': 'saved-label'},
+        "ports": {'aa74aa4v-741a': 'saved-port'},
+        "security_groups": {'7q844add-3697': 'saved-sec-group'},
+        "subnets": {'55ttda4a-2584': 'saved-subnet'},
+        "subnetpools": {'8acf64c1-43fc': 'saved-subnet-pool'},
+        "regions": {'RegionOne': {}}
     }
     # Mocked methods
     get_method = 'tempest.lib.common.rest_client.RestClient.get'
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 5d9ddfa..3b5e901 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -21,7 +21,6 @@
 from unittest import mock
 
 import fixtures
-import six
 
 from tempest.cmd import run
 from tempest.cmd import workspace
@@ -29,10 +28,6 @@
 from tempest.lib.common.utils import data_utils
 from tempest.tests import base
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 DEVNULL = open(os.devnull, 'wb')
 atexit.register(DEVNULL.close)
 
@@ -72,6 +67,11 @@
 
 
 class TestRunReturnCode(base.TestCase):
+
+    exclude_regex = '--exclude-regex'
+    exclude_list = '--exclude-list'
+    include_list = '--include-list'
+
     def setUp(self):
         super(TestRunReturnCode, self).setUp()
         # Setup test dirs
@@ -96,6 +96,14 @@
         self.addCleanup(os.chdir, os.path.abspath(os.curdir))
         os.chdir(self.directory)
 
+    def _get_test_list_file(self, content):
+        fd, path = tempfile.mkstemp()
+        self.addCleanup(os.remove, path)
+        test_file = os.fdopen(fd, 'wb', 0)
+        self.addCleanup(test_file.close)
+        test_file.write(content.encode('utf-8'))
+        return path
+
     def assertRunExit(self, cmd, expected):
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
@@ -119,19 +127,23 @@
         subprocess.call(['stestr', 'init'])
         self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
 
-    def test_tempest_run_blackregex_failing(self):
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+    def test_tempest_run_exclude_regex_failing(self):
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'failing'], 0)
 
-    def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+    def test_tempest_run_exclude_regex_failing_with_stestr_repository(self):
         subprocess.call(['stestr', 'init'])
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'failing'], 0)
 
-    def test_tempest_run_blackregex_passing(self):
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+    def test_tempest_run_exclude_regex_passing(self):
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'passing'], 1)
 
-    def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+    def test_tempest_run_exclude_regex_passing_with_stestr_repository(self):
         subprocess.call(['stestr', 'init'])
-        self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+        self.assertRunExit(['tempest', 'run',
+                            self.exclude_regex, 'passing'], 1)
 
     def test_tempest_run_fails(self):
         self.assertRunExit(['tempest', 'run'], 1)
@@ -140,61 +152,44 @@
         subprocess.call(['stestr', 'init'])
         out, err = self.assertRunExit(['tempest', 'run', '-l'], 0)
         tests = out.split()
-        tests = sorted([six.text_type(x.rstrip()) for x in tests if x])
+        tests = sorted([str(x.rstrip()) for x in tests if x])
         result = [
-            six.text_type('tests.test_failing.FakeTestClass.test_pass'),
-            six.text_type('tests.test_failing.FakeTestClass.test_pass_list'),
-            six.text_type('tests.test_passing.FakeTestClass.test_pass'),
-            six.text_type('tests.test_passing.FakeTestClass.test_pass_list'),
+            str('tests.test_failing.FakeTestClass.test_pass'),
+            str('tests.test_failing.FakeTestClass.test_pass_list'),
+            str('tests.test_passing.FakeTestClass.test_pass'),
+            str('tests.test_passing.FakeTestClass.test_pass_list'),
         ]
         # NOTE(mtreinish): on python 3 the subprocess prints b'' around
         # stdout.
-        if six.PY3:
-            result = ["b\'" + x + "\'" for x in result]
+        result = ["b\'" + x + "\'" for x in result]
         self.assertEqual(result, tests)
 
     def test_tempest_run_with_worker_file(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        worker_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(worker_file.close)
-        worker_file.write(
-            '- worker:\n  - passing\n  concurrency: 3'.encode('utf-8'))
+        path = self._get_test_list_file(
+            '- worker:\n  - passing\n  concurrency: 3')
         self.assertRunExit(['tempest', 'run', '--worker-file=%s' % path], 0)
 
-    def test_tempest_run_with_whitelist(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path], 0)
+    def test_tempest_run_with_include_list(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path)], 0)
 
-    def test_tempest_run_with_whitelist_regex_include_pass_check_fail(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_regex_include_pass_check_fail(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'fail'], 1)
 
-    def test_tempest_run_with_whitelist_regex_include_pass_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_regex_include_pass_check_pass(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'passing'], 0)
 
-    def test_tempest_run_with_whitelist_regex_include_fail_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        whitelist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(whitelist_file.close)
-        whitelist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+    def test_tempest_run_with_include_regex_include_fail_check_pass(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.include_list, path),
                             '--regex', 'pass'], 1)
 
     def test_tempest_run_passes_with_config_file(self):
@@ -202,50 +197,75 @@
                             '--config-file', self.stestr_conf_file,
                             '--regex', 'passing'], 0)
 
-    def test_tempest_run_with_blacklist_failing(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 0)
+    def test_tempest_run_with_exclude_list_failing(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path)], 0)
 
-    def test_tempest_run_with_blacklist_passing(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 1)
+    def test_tempest_run_with_exclude_list_passing(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path)], 1)
 
-    def test_tempest_run_with_blacklist_regex_exclude_fail_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('failing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_fail_check_pass(self):
+        path = self._get_test_list_file('failing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'pass'], 0)
 
-    def test_tempest_run_with_blacklist_regex_exclude_pass_check_pass(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_pass_check_pass(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'pass'], 1)
 
-    def test_tempest_run_with_blacklist_regex_exclude_pass_check_fail(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        blacklist_file = os.fdopen(fd, 'wb', 0)
-        self.addCleanup(blacklist_file.close)
-        blacklist_file.write('passing'.encode('utf-8'))
-        self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+    def test_tempest_run_with_exclude_list_regex_exclude_pass_check_fail(self):
+        path = self._get_test_list_file('passing')
+        self.assertRunExit(['tempest', 'run',
+                            '%s=%s' % (self.exclude_list, path),
                             '--regex', 'fail'], 1)
 
 
+class TestOldArgRunReturnCode(TestRunReturnCode):
+    """A class for testing deprecated but still supported args.
+
+    This class will be removed once we remove the following arguments:
+      * --black-regex
+      * --blacklist-file
+      * --whitelist-file
+    """
+    exclude_regex = '--black-regex'
+    exclude_list = '--blacklist-file'
+    include_list = '--whitelist-file'
+
+    def _test_args_passing(self, args):
+        self.assertRunExit(['tempest', 'run'] + args, 0)
+
+    def test_tempest_run_new_old_arg_comb(self):
+        path = self._get_test_list_file('failing')
+        self._test_args_passing(['--black-regex', 'failing',
+                                 '--exclude-regex', 'failing'])
+        self._test_args_passing(['--blacklist-file=' + path,
+                                 '--exclude-list=' + path])
+        path = self._get_test_list_file('passing')
+        self._test_args_passing(['--whitelist-file=' + path,
+                                 '--include-list=' + path])
+
+    def _test_args_passing_with_stestr_repository(self, args):
+        subprocess.call(['stestr', 'init'])
+        self.assertRunExit(['tempest', 'run'] + args, 0)
+
+    def test_tempest_run_new_old_arg_comb_with_stestr_repository(self):
+        path = self._get_test_list_file('failing')
+        self._test_args_passing_with_stestr_repository(
+            ['--black-regex', 'failing', '--exclude-regex', 'failing'])
+        self._test_args_passing_with_stestr_repository(
+            ['--blacklist-file=' + path, '--exclude-list=' + path])
+        path = self._get_test_list_file('passing')
+        self._test_args_passing_with_stestr_repository(
+            ['--whitelist-file=' + path, '--include-list=' + path])
+
+
 class TestConfigPathCheck(base.TestCase):
     def setUp(self):
         super(TestConfigPathCheck, self).setUp()
diff --git a/tempest/tests/cmd/test_tempest_init.py b/tempest/tests/cmd/test_tempest_init.py
index 9042b12..fce0882 100644
--- a/tempest/tests/cmd/test_tempest_init.py
+++ b/tempest/tests/cmd/test_tempest_init.py
@@ -40,7 +40,7 @@
 
     def test_generate_sample_config(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         init_cmd = init.TempestInit(None, None)
         local_sample_conf_file = os.path.join(etc_dir_path,
@@ -56,7 +56,7 @@
 
     def test_update_local_conf(self):
         local_dir = self.useFixture(fixtures.TempDir())
-        etc_dir_path = os.path.join(local_dir.path, 'etc/')
+        etc_dir_path = os.path.join(local_dir.path, 'etc')
         os.mkdir(etc_dir_path)
         lock_dir = os.path.join(local_dir.path, 'tempest_lock')
         config_path = os.path.join(etc_dir_path, 'tempest.conf')
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 721fd76..a8a4c0f 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -18,12 +18,9 @@
 import fixtures
 from oslo_serialization import jsonutils as json
 
-from tempest import clients
 from tempest.cmd import init
 from tempest.cmd import verify_tempest_config
-from tempest.common import credentials_factory
 from tempest import config
-from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
 from tempest.lib import exceptions as lib_exc
 from tempest.tests import base
@@ -97,15 +94,15 @@
         self.useFixture(fixtures.MockPatchObject(
             verify_tempest_config, '_get_unversioned_endpoint',
             return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
         fake_resp = json.dumps(fake_resp)
         self.useFixture(fixtures.MockPatch(
             'tempest.lib.common.http.ClosingHttp.request',
             return_value=(None, fake_resp)))
         fake_os = mock.MagicMock()
         versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
-        self.assertIn('v1.0', versions)
         self.assertIn('v2.0', versions)
+        self.assertIn('v3.0', versions)
 
     def test_get_nova_versions(self):
         self.useFixture(fixtures.MockPatchObject(
@@ -145,7 +142,7 @@
         self.assertTrue(mock_log_error.called)
 
     def test_verify_api_versions(self):
-        api_services = ['cinder', 'glance', 'keystone']
+        api_services = ['glance', 'keystone']
         fake_os = mock.MagicMock()
         for svc in api_services:
             m = 'verify_%s_api_versions' % svc
@@ -154,7 +151,7 @@
                 verify_mock.assert_called_once_with(fake_os, True)
 
     def test_verify_api_versions_not_implemented(self):
-        api_services = ['cinder', 'glance', 'keystone']
+        api_services = ['glance', 'keystone']
         fake_os = mock.MagicMock()
         for svc in api_services:
             m = 'verify_%s_api_versions' % svc
@@ -178,52 +175,6 @@
                                            'identity-feature-enabled',
                                            False, True)
 
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v3(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v2.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_any_call('api_v3', 'volume-feature-enabled',
-                                   False, True)
-        self.assertEqual(1, print_mock.call_count)
-
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v2(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v3.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_any_call('api_v2', 'volume-feature-enabled',
-                                   False, True)
-        self.assertEqual(1, print_mock.call_count)
-
-    @mock.patch('tempest.lib.common.http.ClosingHttp.request')
-    def test_verify_cinder_api_versions_no_v1(self, mock_request):
-        self.useFixture(fixtures.MockPatchObject(
-            verify_tempest_config, '_get_unversioned_endpoint',
-            return_value='http://fake_endpoint:5000'))
-        fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
-        fake_resp = json.dumps(fake_resp)
-        mock_request.return_value = (None, fake_resp)
-        fake_os = mock.MagicMock()
-        with mock.patch.object(verify_tempest_config,
-                               'print_and_or_update') as print_mock:
-            verify_tempest_config.verify_cinder_api_versions(fake_os, True)
-        print_mock.assert_not_called()
-
     def test_verify_glance_version_no_v2_with_v1_1(self):
         # This test verifies that wrong config api_v2 = True is detected
         class FakeClient(object):
@@ -560,26 +511,27 @@
         self.assertEqual([], results['swift']['extensions'])
 
     def test_get_extension_client(self):
-        creds = credentials_factory.get_credentials(
-            fill_in=False, username='fake_user', project_name='fake_project',
-            password='fake_password')
-        os = clients.Manager(creds)
-        for service in ['nova', 'neutron', 'swift', 'cinder']:
+        fake_os = mock.MagicMock()
+        services = {
+            'nova': fake_os.compute.ExtensionsClient(),
+            'neutron': fake_os.network.ExtensionsClient(),
+            'swift': fake_os.object_storage.CapabilitiesClient(),
+            'cinder': fake_os.volume_v2.ExtensionsClient(),
+        }
+        for service in services.keys():
             extensions_client = verify_tempest_config.get_extension_client(
-                os, service)
-            self.assertIsInstance(extensions_client, rest_client.RestClient)
+                fake_os, service)
+            self.assertIsInstance(extensions_client, mock.MagicMock)
+            self.assertEqual(extensions_client, services[service])
 
     def test_get_extension_client_sysexit(self):
-        creds = credentials_factory.get_credentials(
-            fill_in=False, username='fake_user', project_name='fake_project',
-            password='fake_password')
-        os = clients.Manager(creds)
+        fake_os = mock.MagicMock()
         self.assertRaises(SystemExit,
                           verify_tempest_config.get_extension_client,
-                          os, 'fakeservice')
+                          fake_os, 'fakeservice')
 
     def test_get_config_file(self):
-        conf_dir = os.path.join(os.getcwd(), 'etc/')
+        conf_dir = os.path.join(os.getcwd(), 'etc')
         conf_file = "tempest.conf.sample"
         local_sample_conf_file = os.path.join(conf_dir, conf_file)
 
diff --git a/tempest/tests/cmd/test_workspace.py b/tempest/tests/cmd/test_workspace.py
index eae6202..f16d533 100644
--- a/tempest/tests/cmd/test_workspace.py
+++ b/tempest/tests/cmd/test_workspace.py
@@ -12,15 +12,12 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+from io import StringIO
 import os
 import shutil
 import subprocess
 import tempfile
 from unittest.mock import patch
-try:
-    from StringIO import StringIO
-except ImportError:
-    from io import StringIO
 
 from tempest.cmd import workspace
 from tempest.lib.common.utils import data_utils
diff --git a/tempest/tests/common/test_compute.py b/tempest/tests/common/test_compute.py
index 45a439c..142bb08 100644
--- a/tempest/tests/common/test_compute.py
+++ b/tempest/tests/common/test_compute.py
@@ -15,7 +15,7 @@
 
 from unittest import mock
 
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
 
 
 from tempest.common import compute
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 0ef3742..374474d 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -173,10 +173,15 @@
     @mock.patch.object(cf, 'get_credentials')
     def test_get_configured_admin_credentials(self, mock_get_credentials):
         cfg.CONF.set_default('auth_version', 'v3', 'identity')
-        all_params = [('admin_username', 'username', 'my_name'),
-                      ('admin_password', 'password', 'secret'),
-                      ('admin_project_name', 'project_name', 'my_pname'),
-                      ('admin_domain_name', 'domain_name', 'my_dname')]
+        all_params = [
+            ('admin_username', 'username', 'my_name'),
+            ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+            ('admin_password', 'password', 'secret'),
+            ('admin_project_name', 'project_name', 'my_pname'),
+            ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+            ('admin_domain_name', 'domain_name', 'my_dname'),
+            ('admin_system', 'system', None),
+        ]
         expected_result = 'my_admin_credentials'
         mock_get_credentials.return_value = expected_result
         for config_item, _, value in all_params:
@@ -194,10 +199,15 @@
     def test_get_configured_admin_credentials_not_fill_valid(
             self, mock_get_credentials):
         cfg.CONF.set_default('auth_version', 'v2', 'identity')
-        all_params = [('admin_username', 'username', 'my_name'),
-                      ('admin_password', 'password', 'secret'),
-                      ('admin_project_name', 'project_name', 'my_pname'),
-                      ('admin_domain_name', 'domain_name', 'my_dname')]
+        all_params = [
+            ('admin_username', 'username', 'my_name'),
+            ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+            ('admin_password', 'password', 'secret'),
+            ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+            ('admin_project_name', 'project_name', 'my_pname'),
+            ('admin_domain_name', 'domain_name', 'my_dname'),
+            ('admin_system', 'system', None),
+        ]
         expected_result = mock.Mock()
         expected_result.is_valid.return_value = True
         mock_get_credentials.return_value = expected_result
@@ -278,3 +288,20 @@
         mock_auth_get_credentials.assert_called_once_with(
             expected_uri, fill_in=False, identity_version='v3',
             **expected_params)
+
+    @mock.patch('tempest.lib.auth.get_credentials')
+    def test_get_credentials_v3_system(self, mock_auth_get_credentials):
+        expected_uri = 'V3_URI'
+        expected_result = 'my_creds'
+        mock_auth_get_credentials.return_value = expected_result
+        cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
+        cfg.CONF.set_default('admin_system', 'all', 'auth')
+        params = {'system': 'all'}
+        expected_params = params.copy()
+        expected_params.update(config.service_client_config())
+        result = cf.get_credentials(fill_in=False, identity_version='v3',
+                                    **params)
+        self.assertEqual(expected_result, result)
+        mock_auth_get_credentials.assert_called_once_with(
+            expected_uri, fill_in=False, identity_version='v3',
+            **expected_params)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 5f8b990..f801243 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -20,6 +20,7 @@
 from tempest.common import waiters
 from tempest import exceptions
 from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
 from tempest.lib.services.volume.v2 import volumes_client
 from tempest.tests import base
 import tempest.tests.utils as utils
@@ -55,6 +56,93 @@
                           waiters.wait_for_image_status,
                           self.client, 'fake_image_id', 'active')
 
+    def test_wait_for_image_imported_to_stores(self):
+        self.client.show_image.return_value = ({'status': 'active',
+                                                'stores': 'fake_store'})
+        start_time = int(time.time())
+        waiters.wait_for_image_imported_to_stores(
+            self.client, 'fake_image_id', 'fake_store')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_imported_to_stores_failure(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.OtherRestClientException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_imported_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_copied_to_stores(self):
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': '',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        start_time = int(time.time())
+        waiters.wait_for_image_copied_to_stores(
+            self.client, 'fake_image_id')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_copied_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        self.patch('time.time', side_effect=[0., 1.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': 'processing',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_copied_to_stores,
+                          self.client, 'fake_image_id')
+
+    def test_wait_for_image_tasks_status(self):
+        self.client.show_image_tasks.return_value = ({
+            'tasks': [{'status': 'success'}]})
+        start_time = int(time.time())
+        waiters.wait_for_image_tasks_status(
+            self.client, 'fake_image_id', 'success')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_tasks_status_timeout(self):
+        time_mock = self.patch('time.time')
+        self.patch('time.time', side_effect=[0., 1.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        self.client.show_image_tasks.return_value = ({
+            'tasks': [
+                {'status': 'success'},
+                {'status': 'processing'}]})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_tasks_status,
+                          self.client, 'fake_image_id', 'success')
+
 
 class TestInterfaceWaiters(base.TestCase):
 
@@ -131,6 +219,36 @@
                                           mock.call('server_id')])
         sleep.assert_called_once_with(client.build_interval)
 
+    def test_wait_for_guest_os_boot(self):
+        get_console_output = mock.Mock(
+            side_effect=[
+                {'output': 'os not ready yet\n'},
+                {'output': 'login:\n'}
+            ])
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', return_value=0.)
+        sleep = self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        get_console_output.assert_has_calls([
+            mock.call('server_id'), mock.call('server_id')])
+        sleep.assert_called_once_with(client.build_interval)
+        log_info.assert_not_called()
+
+    def test_wait_for_guest_os_boot_timeout(self):
+        get_console_output = mock.Mock(
+            return_value={'output': 'os not ready yet\n'})
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        log_info.assert_called_once()
+
 
 class TestVolumeWaiters(base.TestCase):
     vol_migrating_src_host = {
@@ -234,6 +352,29 @@
                                     mock.call(volume_id)])
         mock_sleep.assert_called_once_with(1)
 
+    def test_wait_for_volume_attachment_create(self):
+        vol_detached = {'volume': {'attachments': []}}
+        vol_attached = {'volume': {'attachments': [
+                       {'id': uuids.volume_id,
+                        'attachment_id': uuids.attachment_id,
+                        'server_id': uuids.server_id,
+                        'volume_id': uuids.volume_id}]}}
+        show_volume = mock.MagicMock(side_effect=[
+            vol_detached, vol_detached, vol_attached])
+        client = mock.Mock(spec=volumes_client.VolumesClient,
+                           build_interval=1,
+                           build_timeout=5,
+                           show_volume=show_volume)
+        self.patch('time.time')
+        self.patch('time.sleep')
+        att = waiters.wait_for_volume_attachment_create(
+            client, uuids.volume_id, uuids.server_id)
+        assert att == vol_attached['volume']['attachments'][0]
+        # Assert that show volume is called until the attachment is removed.
+        show_volume.assert_has_calls([mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id)])
+
     def test_wait_for_volume_attachment(self):
         vol_detached = {'volume': {'attachments': []}}
         vol_attached = {'volume': {'attachments': [
@@ -249,9 +390,9 @@
         waiters.wait_for_volume_attachment_remove(client, uuids.volume_id,
                                                   uuids.attachment_id)
         # Assert that show volume is called until the attachment is removed.
-        show_volume.assert_has_calls = [mock.call(uuids.volume_id),
-                                        mock.call(uuids.volume_id),
-                                        mock.call(uuids.volume_id)]
+        show_volume.assert_has_calls([mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id),
+                                      mock.call(uuids.volume_id)])
 
     def test_wait_for_volume_attachment_timeout(self):
         show_volume = mock.MagicMock(return_value={
@@ -281,3 +422,54 @@
                                                   uuids.attachment_id)
         # Assert that show volume is only called once before we return
         show_volume.assert_called_once_with(uuids.volume_id)
+
+    def test_wait_for_volume_attachment_remove_from_server(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        volume_not_attached = {"volumeAttachments": []}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_not_attached])
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        waiters.wait_for_volume_attachment_remove_from_server(
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
+
+    def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+        volume_attached = {
+            "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+        mock_list_volume_attachments = mock.Mock(
+            side_effect=[volume_attached, volume_attached])
+        mock_client = mock.Mock(
+            spec=servers_client.ServersClient,
+            build_interval=1,
+            build_timeout=1,
+            list_volume_attachments=mock_list_volume_attachments)
+        self.patch(
+            'time.time',
+            side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        self.assertRaises(
+            lib_exc.TimeoutException,
+            waiters.wait_for_volume_attachment_remove_from_server,
+            mock_client, uuids.server_id, uuids.volume_id)
+
+        # Assert that list_volume_attachments is called until the attachment is
+        # removed.
+        mock_list_volume_attachments.assert_has_calls([
+            mock.call(uuids.server_id),
+            mock.call(uuids.server_id)])
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
index 28ebca1..5d63dec 100644
--- a/tempest/tests/lib/cmd/test_check_uuid.py
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -10,15 +10,16 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import ast
 import importlib
 import os
+import shutil
 import sys
 import tempfile
 from unittest import mock
 
-import fixtures
-
 from tempest.lib.cmd import check_uuid
+from tempest.lib import decorators
 from tempest.tests import base
 
 
@@ -39,23 +40,24 @@
         return tests_file
 
     def test_fix_argument_no(self):
-        temp_dir = self.useFixture(fixtures.TempDir(rootdir="."))
-        tests_file = self.create_tests_file(temp_dir.path)
-
+        temp_dir = tempfile.mkdtemp(prefix='check-uuid-no', dir=".")
+        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+        tests_file = self.create_tests_file(temp_dir)
         sys.argv = [sys.argv[0]] + ["--package",
-                                    os.path.relpath(temp_dir.path)]
+                                    os.path.relpath(temp_dir)]
 
         self.assertRaises(SystemExit, check_uuid.run)
         with open(tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE == f.read())
 
+    @decorators.skip_because(bug='1918316')
     def test_fix_argument_yes(self):
-        temp_dir = self.useFixture(fixtures.TempDir(rootdir="."))
-        tests_file = self.create_tests_file(temp_dir.path)
+        temp_dir = tempfile.mkdtemp(prefix='check-uuid-yes', dir=".")
+        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
+        tests_file = self.create_tests_file(temp_dir)
 
         sys.argv = [sys.argv[0]] + ["--fix", "--package",
-                                    os.path.relpath(temp_dir.path)]
-
+                                    os.path.relpath(temp_dir)]
         check_uuid.run()
         with open(tests_file, "r") as f:
             self.assertTrue(TestCLInterface.CODE != f.read())
@@ -95,6 +97,8 @@
 
 
 class TestTestChecker(base.TestCase):
+    IMPORT_LINE = "from tempest.lib import decorators\n"
+
     def _test_add_uuid_to_test(self, source_file):
         class Fake_test_node():
             lineno = 1
@@ -127,55 +131,69 @@
                        "        pass")
         self._test_add_uuid_to_test(source_file)
 
+    @staticmethod
+    def get_mocked_ast_object(lineno, col_offset, module, name, object_type):
+        ast_object = mock.Mock(spec=object_type)
+        name_obj = mock.Mock()
+        ast_object.lineno = lineno
+        ast_object.col_offset = col_offset
+        name_obj.name = name
+        ast_object.module = module
+        ast_object.names = [name_obj]
+
+        return ast_object
+
     def test_add_import_for_test_uuid_no_tempest(self):
         patcher = check_uuid.SourcePatcher()
         checker = check_uuid.TestChecker(importlib.import_module('tempest'))
-        fake_file = tempfile.NamedTemporaryFile("w+t")
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        source_code = "from unittest import mock\n"
+        fake_file.write(source_code)
+        fake_file.close()
 
         class Fake_src_parsed():
-            body = ['test_node']
-        checker._import_name = mock.Mock(return_value='fake_module')
+            body = [TestTestChecker.get_mocked_ast_object(
+                1, 4, 'unittest', 'mock', ast.ImportFrom)]
 
-        checker._add_import_for_test_uuid(patcher, Fake_src_parsed(),
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
                                           fake_file.name)
-        (patch_id, patch), = patcher.patches.items()
-        self.assertEqual(patcher._quote('\n' + check_uuid.IMPORT_LINE + '\n'),
-                         patch)
-        self.assertEqual('{%s:s}' % patch_id,
-                         patcher.source_files[fake_file.name])
+        patcher.apply_patches()
+
+        with open(fake_file.name, "r") as f:
+            expected_result = source_code + '\n' + TestTestChecker.IMPORT_LINE
+            self.assertTrue(expected_result == f.read())
 
     def test_add_import_for_test_uuid_tempest(self):
         patcher = check_uuid.SourcePatcher()
         checker = check_uuid.TestChecker(importlib.import_module('tempest'))
         fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
-        test1 = ("    def test_test():\n"
-                 "        pass\n")
-        test2 = ("    def test_another_test():\n"
-                 "        pass\n")
-        source_code = test1 + test2
+        source_code = "from tempest import a_fake_module\n"
         fake_file.write(source_code)
         fake_file.close()
 
-        def fake_import_name(node):
-            return node.name
-        checker._import_name = fake_import_name
+        class Fake_src_parsed:
+            body = [TestTestChecker.get_mocked_ast_object(
+                1, 4, 'tempest', 'a_fake_module', ast.ImportFrom)]
 
-        class Fake_node():
-            def __init__(self, lineno, col_offset, name):
-                self.lineno = lineno
-                self.col_offset = col_offset
-                self.name = name
-
-        class Fake_src_parsed():
-            body = [Fake_node(1, 4, 'tempest.a_fake_module'),
-                    Fake_node(3, 4, 'another_fake_module')]
-
-        checker._add_import_for_test_uuid(patcher, Fake_src_parsed(),
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
                                           fake_file.name)
-        (patch_id, patch), = patcher.patches.items()
-        self.assertEqual(patcher._quote(check_uuid.IMPORT_LINE + '\n'),
-                         patch)
-        expected_source = patcher._quote(test1) + '{' + patch_id + ':s}' +\
-            patcher._quote(test2)
-        self.assertEqual(expected_source,
-                         patcher.source_files[fake_file.name])
+        patcher.apply_patches()
+
+        with open(fake_file.name, "r") as f:
+            expected_result = source_code + TestTestChecker.IMPORT_LINE
+            self.assertTrue(expected_result == f.read())
+
+    def test_add_import_no_import(self):
+        patcher = check_uuid.SourcePatcher()
+        patcher.add_patch = mock.Mock()
+        checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+        fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+        fake_file.close()
+
+        class Fake_src_parsed:
+            body = []
+
+        checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
+                                          fake_file.name)
+
+        self.assertTrue(not patcher.add_patch.called)
diff --git a/tempest/tests/lib/common/test_api_version_utils.py b/tempest/tests/lib/common/test_api_version_utils.py
index b99e8d4..8d5de09 100644
--- a/tempest/tests/lib/common/test_api_version_utils.py
+++ b/tempest/tests/lib/common/test_api_version_utils.py
@@ -12,7 +12,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
 import testtools
 
 from tempest.lib.common import api_version_utils
@@ -31,7 +30,7 @@
                                                            cfg_max_version)
         except testtools.TestCase.skipException as e:
             if not expected_skip:
-                raise testtools.TestCase.failureException(six.text_type(e))
+                raise testtools.TestCase.failureException(str(e))
 
     def test_version_min_in_range(self):
         self._test_version('2.2', '2.10', '2.1', '2.7')
diff --git a/tempest/tests/lib/common/test_cred_client.py b/tempest/tests/lib/common/test_cred_client.py
index 860a465..7ea660b 100644
--- a/tempest/tests/lib/common/test_cred_client.py
+++ b/tempest/tests/lib/common/test_cred_client.py
@@ -43,6 +43,14 @@
         self.projects_client.delete_tenant.assert_called_once_with(
             'fake_id')
 
+    def test_get_credentials(self):
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            {'name': 'some_project', 'id': 'fake_id'},
+            'password123')
+        self.assertEqual(ret.username, 'some_user')
+        self.assertEqual(ret.project_name, 'some_project')
+
 
 class TestCredClientV3(base.TestCase):
     def setUp(self):
@@ -53,7 +61,7 @@
         self.roles_client = mock.MagicMock()
         self.domains_client = mock.MagicMock()
         self.domains_client.list_domains.return_value = {
-            'domains': [{'id': 'fake_domain_id'}]
+            'domains': [{'id': 'fake_domain_id', 'name': 'some_domain'}]
         }
         self.creds_client = cred_client.V3CredsClient(self.identity_client,
                                                       self.projects_client,
@@ -75,3 +83,49 @@
         self.creds_client.delete_project('fake_id')
         self.projects_client.delete_project.assert_called_once_with(
             'fake_id')
+
+    def test_get_credentials(self):
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            {'name': 'some_project', 'id': 'fake_id'},
+            'password123')
+        self.assertEqual(ret.username, 'some_user')
+        self.assertEqual(ret.project_name, 'some_project')
+        self.assertIsNone(ret.system)
+        self.assertEqual(ret.domain_name, 'some_domain')
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            None,
+            'password123',
+            domain={'name': 'another_domain', 'id': 'another_id'})
+        self.assertEqual(ret.username, 'some_user')
+        self.assertIsNone(ret.project_name)
+        self.assertIsNone(ret.system)
+        self.assertEqual(ret.domain_name, 'another_domain')
+        ret = self.creds_client.get_credentials(
+            {'name': 'some_user', 'id': 'fake_id'},
+            None,
+            'password123',
+            system={'system': 'all'})
+        self.assertEqual(ret.username, 'some_user')
+        self.assertIsNone(ret.project_name)
+        self.assertEqual(ret.system, {'system': 'all'})
+        self.assertEqual(ret.domain_name, 'some_domain')
+
+    def test_create_user(self):
+        self.users_client.create_user.return_value = {
+            'user': 'a_user'
+        }
+        fake_project = {
+            'id': 'fake_project_id',
+        }
+        res = self.creds_client.create_user('fake_username',
+                                            'fake_password',
+                                            fake_project,
+                                            'fake_email')
+        self.assertEqual('a_user', res)
+        self.users_client.create_user.assert_called_once_with(
+            name='fake_username', password='fake_password',
+            project_id=fake_project['id'],
+            email='fake_email',
+            domain_id='fake_domain_id')
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index e9073cc..b4b1b91 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -214,6 +214,56 @@
         self.assertEqual(admin_creds.user_id, '1234')
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_admin_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_roles('1234', 'admin')
+        self._mock_user_create('1234', 'fake_alt_admin_user')
+        self._mock_tenant_create('1234', 'fake_alt_admin')
+
+        user_mock = mock.patch.object(self.roles_client.RolesClient,
+                                      'create_user_role_on_project')
+        user_mock.start()
+        self.addCleanup(user_mock.stop)
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            alt_admin_creds = creds.get_project_alt_admin_creds()
+        user_mock.assert_has_calls([
+            mock.call('1234', '1234', '1234')])
+        self.assertEqual(alt_admin_creds.username, 'fake_alt_admin_user')
+        self.assertEqual(alt_admin_creds.project_name, 'fake_alt_admin')
+        # Verify IDs
+        self.assertEqual(alt_admin_creds.project_id, '1234')
+        self.assertEqual(alt_admin_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_member_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_assign_user_role()
+        self._mock_list_role()
+        self._mock_tenant_create('1234', 'fake_alt_member')
+        self._mock_user_create('1234', 'fake_alt_user')
+        alt_member_creds = creds.get_project_alt_member_creds()
+        self.assertEqual(alt_member_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_member_creds.project_name, 'fake_alt_member')
+        # Verify IDs
+        self.assertEqual(alt_member_creds.project_id, '1234')
+        self.assertEqual(alt_member_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_project_alt_reader_creds(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_assign_user_role()
+        self._mock_list_roles('1234', 'reader')
+        self._mock_tenant_create('1234', 'fake_alt_reader')
+        self._mock_user_create('1234', 'fake_alt_user')
+        alt_reader_creds = creds.get_project_alt_reader_creds()
+        self.assertEqual(alt_reader_creds.username, 'fake_alt_user')
+        self.assertEqual(alt_reader_creds.project_name, 'fake_alt_reader')
+        # Verify IDs
+        self.assertEqual(alt_reader_creds.project_id, '1234')
+        self.assertEqual(alt_reader_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_role_creds(self, MockRestClient):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         self._mock_list_2_roles()
@@ -242,6 +292,100 @@
         self.assertEqual(role_creds.user_id, '1234')
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_project_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+
+        user_mock = mock.patch.object(self.roles_client.RolesClient,
+                                      'create_user_role_on_project')
+        user_mock.start()
+        self.addCleanup(user_mock.stop)
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='project')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn(('1234', '1234', '1234'), args)
+        self.assertIn(('1234', '1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.project_name, 'fake_role_project')
+        # Verify IDs
+        self.assertEqual(role_creds.project_id, '1234')
+        self.assertEqual(role_creds.user_id, '1234')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def _test_get_same_role_creds_with_project_scope(self, MockRestClient,
+                                                     scope=None):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    def test_get_same_role_creds_with_project_scope(self):
+        self._test_get_same_role_creds_with_project_scope(scope='project')
+
+    def test_get_same_role_creds_with_default_scope(self):
+        self._test_get_same_role_creds_with_project_scope()
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def _test_get_different_role_creds_with_project_scope(
+            self, MockRestClient, scope=None):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope=scope)
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Fetch the creds with one role different
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope=scope)
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    def test_get_different_role_creds_with_project_scope(self):
+        self._test_get_different_role_creds_with_project_scope(
+            scope='project')
+
+    def test_get_different_role_creds_with_default_scope(self):
+        self._test_get_different_role_creds_with_project_scope()
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_all_cred_cleanup(self, MockRestClient):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         self._mock_assign_user_role()
@@ -658,6 +802,232 @@
         return project_fix
 
     @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn(('1234', '1234'), args)
+        self.assertIn(('1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify system scope
+        self.assertEqual(role_creds.system, 'all')
+        # Verify domain is default
+        self.assertEqual(role_creds.domain_id, 'default')
+        self.assertEqual(role_creds.domain_name, 'Default')
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_same_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Verify system scope
+        self.assertEqual(role_creds_new.system, 'all')
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_different_role_creds_with_system_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify system scope
+        self.assertEqual(role_creds.system, 'all')
+        # Fetch the creds with one role different
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope='system')
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Verify Scope
+        self.assertEqual(role_creds_new.system, 'all')
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        args = map(lambda x: x[1], calls)
+        args = list(args)
+        self.assertIn((domain['id'], '1234', '1234'), args)
+        self.assertIn((domain['id'], '1234', '12345'), args)
+        self.assertEqual(role_creds.username, 'fake_role_user')
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify creds are under new created domain
+        self.assertEqual(role_creds.domain_id, domain['id'])
+        self.assertEqual(role_creds.domain_name, domain['name'])
+        # Verify that Scope is None
+        self.assertIsNone(role_creds.system)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_same_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify Scope
+        self.assertIsNone(role_creds.system)
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock1.mock_calls
+        # Assert that previously created creds are return and no call to
+        # role creation.
+        self.assertEqual(len(calls), 0)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check if previously created creds are returned.
+        self.assertEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_different_role_creds_with_domain_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+
+        domain = {
+            "id": '12',
+            "enabled": True,
+            "name": "TestDomain"
+        }
+
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.cred_client.V3CredsClient.create_domain',
+            return_value=domain))
+
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='domain')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        self.assertEqual(role_creds.user_id, '1234')
+        # Verify Scope
+        self.assertIsNone(role_creds.system)
+        # Fetch the same creds again
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_domain') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1'], scope='domain')
+        calls = user_mock1.mock_calls
+        # Because one role is different, assert that the role creation
+        # is called with the 1 specified roles
+        self.assertEqual(len(calls), 1)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check new creds is created for new roles.
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
+    def test_get_role_creds_with_different_scope(self, MockRestClient):
+        creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
+        self._mock_list_2_roles()
+        self._mock_user_create('1234', 'fake_role_user')
+        self._mock_tenant_create('1234', 'fake_role_project')
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_system') as user_mock:
+            role_creds = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='system')
+        calls = user_mock.mock_calls
+        # Assert that the role creation is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify Scope
+        self.assertEqual(role_creds.system, 'all')
+
+        # Fetch the same role creds but with different scope
+        with mock.patch.object(self.roles_client.RolesClient,
+                               'create_user_role_on_project') as user_mock1:
+            role_creds_new = creds.get_creds_by_roles(
+                roles=['role1', 'role2'], scope='project')
+        calls = user_mock1.mock_calls
+        # Because scope is different, assert that the role creation
+        # is called with the 2 specified roles
+        self.assertEqual(len(calls), 2)
+        # Verify Scope
+        self.assertIsNone(role_creds_new.system)
+        # Check that created creds are different
+        self.assertNotEqual(role_creds, role_creds_new)
+
+    @mock.patch('tempest.lib.common.rest_client.RestClient')
     def test_member_role_creation_with_duplicate(self, rest_client_mock):
         creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
         creds.creds_client = mock.MagicMock()
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index 579363e..f2131dc 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -12,17 +12,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import hashlib
 import os
 import shutil
 from unittest import mock
 
-import six
 import testtools
 
 import fixtures
 from oslo_concurrency.fixture import lockutils as lockutils_fixtures
 from oslo_config import cfg
+from oslo_utils.secretutils import md5
 
 from tempest import config
 from tempest.lib import auth
@@ -106,10 +105,10 @@
         hash_fields = (
             preprov_creds.PreProvisionedCredentialProvider.HASH_CRED_FIELDS)
         for account in accounts_list:
-            hash = hashlib.md5()
+            hash = md5(usedforsecurity=False)
             account_for_hash = dict((k, v) for (k, v) in account.items()
                                     if k in hash_fields)
-            hash.update(six.text_type(account_for_hash).encode('utf-8'))
+            hash.update(str(account_for_hash).encode('utf-8'))
             temp_hash = hash.hexdigest()
             hash_list.append(temp_hash)
         return hash_list
@@ -145,8 +144,7 @@
         # Emulate the lock existing on the filesystem
         self.useFixture(fixtures.MockPatch(
             'os.path.isfile', return_value=True))
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             test_account_class = (
                 preprov_creds.PreProvisionedCredentialProvider(
                     **self.fixed_params))
@@ -158,8 +156,7 @@
         # Emulate the lock not existing on the filesystem
         self.useFixture(fixtures.MockPatch(
             'os.path.isfile', return_value=False))
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             test_account_class = (
                 preprov_creds.PreProvisionedCredentialProvider(
                     **self.fixed_params))
@@ -178,7 +175,7 @@
             'os.path.isfile', return_value=False))
         test_account_class = preprov_creds.PreProvisionedCredentialProvider(
             **self.fixed_params)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
+        with mock.patch('builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
             lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
@@ -197,8 +194,7 @@
             'os.path.isfile', return_value=True))
         test_account_class = preprov_creds.PreProvisionedCredentialProvider(
             **self.fixed_params)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
-                        create=True):
+        with mock.patch('builtins.open', mock.mock_open(), create=True):
             self.assertRaises(lib_exc.InvalidCredentials,
                               test_account_class._get_free_hash, hash_list)
 
@@ -218,7 +214,7 @@
             return True
 
         self.patchobject(os.path, 'isfile', _fake_is_file)
-        with mock.patch('six.moves.builtins.open', mock.mock_open(),
+        with mock.patch('builtins.open', mock.mock_open(),
                         create=True) as open_mock:
             test_account_class._get_free_hash(hash_list)
             lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index b861582..c5f6d7a 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -17,7 +17,6 @@
 import fixtures
 import jsonschema
 from oslo_serialization import jsonutils as json
-import six
 
 from tempest.lib.common import http
 from tempest.lib.common import rest_client
@@ -93,7 +92,7 @@
 class TestRestClientHeadersJSON(TestRestClientHTTPMethods):
 
     def _verify_headers(self, resp):
-        resp = dict((k.lower(), v) for k, v in six.iteritems(resp))
+        resp = dict((k.lower(), v) for k, v in resp.items())
         self.assertEqual(self.header_value, resp['accept'])
         self.assertEqual(self.header_value, resp['content-type'])
 
@@ -526,9 +525,11 @@
                           self.rest_client.wait_for_resource_deletion,
                           '1234')
 
-        # time.time() should be called twice, first to start the timer
-        # and then to compute the timedelta
-        self.assertEqual(2, time_mock.call_count)
+        # time.time() should be called 4 times,
+        # 1. Start timer
+        # 2. End timer
+        # 3 & 4. To generate timeout exception message
+        self.assertEqual(4, time_mock.call_count)
 
     def test_wait_for_deletion_with_unimplemented_deleted_method(self):
         self.rest_client.is_resource_deleted = self.original_deleted_method
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index bdc0ea4..d8e3745 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -74,6 +74,17 @@
         self.assertRaises(ValueError, test_utils.call_and_ignore_notfound_exc,
                           raise_value_error)
 
+    def test_call_and_ignore_notfound_exc_when_serverfault_raised(self):
+        calls = []
+
+        def raise_serverfault():
+            calls.append('call')
+            raise exceptions.ServerFault()
+        self.assertRaises(exceptions.ServerFault,
+                          test_utils.call_and_ignore_notfound_exc,
+                          raise_serverfault)
+        self.assertEqual(3, len(calls))
+
     def test_call_and_ignore_notfound_exc(self):
         m = mock.Mock(return_value=42)
         args, kwargs = (1,), {'1': None}
diff --git a/tempest/tests/lib/services/compute/test_aggregates_client.py b/tempest/tests/lib/services/compute/test_aggregates_client.py
index 674d92a..1448a4d 100644
--- a/tempest/tests/lib/services/compute/test_aggregates_client.py
+++ b/tempest/tests/lib/services/compute/test_aggregates_client.py
@@ -37,7 +37,7 @@
     FAKE_CREATE_AGGREGATE = {
         "aggregate":
         {
-            "name": u'\xf4',
+            "name": '\xf4',
             "availability_zone": None,
             "deleted": False,
             "created_at": "2015-07-21T04:11:18.000000",
@@ -50,7 +50,7 @@
     FAKE_UPDATE_AGGREGATE = {
         "aggregate":
         {
-            "name": u'\xe9',
+            "name": '\xe9',
             "availability_zone": None,
             "deleted": False,
             "created_at": "2015-07-16T03:07:32.000000",
@@ -74,7 +74,7 @@
         "metadata": {
             "availability_zone": "nova"
         },
-        "name": u'\xe9',
+        "name": '\xe9',
         "updated_at": None
     }
 
diff --git a/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py b/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py
new file mode 100644
index 0000000..79855ea
--- /dev/null
+++ b/tempest/tests/lib/services/compute/test_assisted_volume_snapshots_client.py
@@ -0,0 +1,53 @@
+# Copyright 2017 AT&T.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.compute import assisted_volume_snapshots_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestVolumesClient(base.BaseServiceTest):
+
+    FAKE_SNAPSHOT = {
+        "id": "bf7b810c-70df-4c64-88a7-8588f7a6739c",
+        "volumeId": "59f17c4f-66d4-4271-be40-f200523423a9"
+    }
+
+    def setUp(self):
+        super(TestVolumesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = assisted_volume_snapshots_client.\
+            AssistedVolumeSnapshotsClient(fake_auth, 'compute', 'regionOne')
+
+    def _test_create_assisted_volume_snapshot(self, bytes_body=False):
+        kwargs = {"type": "qcow2", "new_file": "fake_name"}
+        self.check_service_client_function(
+            self.client.create_assisted_volume_snapshot,
+            'tempest.lib.common.rest_client.RestClient.post',
+            {"snapshot": self.FAKE_SNAPSHOT},
+            bytes_body, status=200, volume_id=self.FAKE_SNAPSHOT['volumeId'],
+            snapshot_id=self.FAKE_SNAPSHOT['id'], **kwargs)
+
+    def test_create_assisted_volume_snapshot_with_str_body(self):
+        self._test_create_assisted_volume_snapshot()
+
+    def test_create_assisted_volume_snapshot_with_byte_body(self):
+        self._test_create_assisted_volume_snapshot(bytes_body=True)
+
+    def test_delete_assisted_volume_snapshot(self):
+        self.check_service_client_function(
+            self.client.delete_assisted_volume_snapshot,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {}, status=204, volume_id=self.FAKE_SNAPSHOT['volumeId'],
+            snapshot_id=self.FAKE_SNAPSHOT['id'])
diff --git a/tempest/tests/lib/services/compute/test_availability_zone_client.py b/tempest/tests/lib/services/compute/test_availability_zone_client.py
index 6608592..aef98fc 100644
--- a/tempest/tests/lib/services/compute/test_availability_zone_client.py
+++ b/tempest/tests/lib/services/compute/test_availability_zone_client.py
@@ -27,7 +27,7 @@
                     "available": True
                 },
                 "hosts": None,
-                "zoneName": u'\xf4'
+                "zoneName": '\xf4'
             }
         ]
     }
diff --git a/tempest/tests/lib/services/compute/test_extensions_client.py b/tempest/tests/lib/services/compute/test_extensions_client.py
index d7e217e..053b84c 100644
--- a/tempest/tests/lib/services/compute/test_extensions_client.py
+++ b/tempest/tests/lib/services/compute/test_extensions_client.py
@@ -27,7 +27,7 @@
             "namespace":
             "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
             "alias": "NMN",
-            "description": u'\u2740(*\xb4\u25e1`*)\u2740'
+            "description": '\u2740(*\xb4\u25e1`*)\u2740'
         }
     }
 
diff --git a/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py b/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
index b0c00f0..6278df4 100644
--- a/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
+++ b/tempest/tests/lib/services/compute/test_floating_ip_pools_client.py
@@ -22,8 +22,8 @@
     FAKE_FLOATING_IP_POOLS = {
         "floating_ip_pools":
         [
-            {"name": u'\u3042'},
-            {"name": u'\u3044'}
+            {"name": '\u3042'},
+            {"name": '\u3044'}
         ]
     }
 
diff --git a/tempest/tests/lib/services/compute/test_keypairs_client.py b/tempest/tests/lib/services/compute/test_keypairs_client.py
index ed3b9dd..214d0e5 100644
--- a/tempest/tests/lib/services/compute/test_keypairs_client.py
+++ b/tempest/tests/lib/services/compute/test_keypairs_client.py
@@ -23,7 +23,7 @@
 
     FAKE_KEYPAIR = {"keypair": {
         "public_key": "ssh-rsa foo Generated-by-Nova",
-        "name": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "name": '\u2740(*\xb4\u25e1`*)\u2740',
         "user_id": "525d55f98980415ba98e634972fa4a10",
         "fingerprint": "76:24:66:49:d7:ca:6e:5c:77:ea:8e:bb:9c:15:5f:98"
         }}
diff --git a/tempest/tests/lib/services/compute/test_networks_client.py b/tempest/tests/lib/services/compute/test_networks_client.py
index 1908b57..5a88671 100644
--- a/tempest/tests/lib/services/compute/test_networks_client.py
+++ b/tempest/tests/lib/services/compute/test_networks_client.py
@@ -31,7 +31,7 @@
         "deleted_at": None,
         "gateway": None,
         "rxtx_base": None,
-        "label": u'30d7',
+        "label": '30d7',
         "priority": None,
         "project_id": None,
         "vpn_private_address": None,
diff --git a/tempest/tests/lib/services/compute/test_quota_classes_client.py b/tempest/tests/lib/services/compute/test_quota_classes_client.py
index 22d8b91..6921365 100644
--- a/tempest/tests/lib/services/compute/test_quota_classes_client.py
+++ b/tempest/tests/lib/services/compute/test_quota_classes_client.py
@@ -29,7 +29,7 @@
         "ram": 51200,
         "floating_ips": 10,
         "key_pairs": 100,
-        "id": u'\u2740(*\xb4\u25e1`*)\u2740',
+        "id": '\u2740(*\xb4\u25e1`*)\u2740',
         "instances": 10,
         "security_group_rules": 20,
         "security_groups": 10,
diff --git a/tempest/tests/lib/services/compute/test_tenant_networks_client.py b/tempest/tests/lib/services/compute/test_tenant_networks_client.py
index f71aad9..a042a1a 100644
--- a/tempest/tests/lib/services/compute/test_tenant_networks_client.py
+++ b/tempest/tests/lib/services/compute/test_tenant_networks_client.py
@@ -22,7 +22,7 @@
     FAKE_NETWORK = {
         "cidr": "None",
         "id": "c2329eb4-cc8e-4439-ac4c-932369309e36",
-        "label": u'\u30d7'
+        "label": '\u30d7'
         }
 
     FAKE_NETWORKS = [FAKE_NETWORK]
diff --git a/tempest/tests/lib/services/identity/v3/test_access_rules_client.py b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
new file mode 100644
index 0000000..71c9cde
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
@@ -0,0 +1,97 @@
+# Copyright 2019 SUSE LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import access_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAccessRulesClient(base.BaseServiceTest):
+    FAKE_LIST_ACCESS_RULES = {
+        "links": {
+            "self": "https://example.com/identity/v3/users/" +
+                    "3e0716ae/access_rules",
+            "previous": None,
+            "next": None
+        },
+        "access_rules": [
+            {
+                "path": "/v2.0/metrics",
+                "links": {
+                    "self": "https://example.com/identity/v3/access_rules/" +
+                            "07d719df00f349ef8de77d542edf010c"
+                },
+                "id": "07d719df00f349ef8de77d542edf010c",
+                "service": "monitoring",
+                "method": "GET"
+            }
+        ]
+    }
+
+    FAKE_ACCESS_RULE_INFO = {
+        "access_rule": {
+            "path": "/v2.0/metrics",
+            "links": {
+                "self": "https://example.com/identity/v3/access_rules/" +
+                        "07d719df00f349ef8de77d542edf010c"
+            },
+            "id": "07d719df00f349ef8de77d542edf010c",
+            "service": "monitoring",
+            "method": "GET"
+        }
+    }
+
+    def setUp(self):
+        super(TestAccessRulesClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = access_rules_client.AccessRulesClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_show_access_rule(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_access_rule,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ACCESS_RULE_INFO,
+            bytes_body,
+            user_id="123456",
+            access_rule_id="5499a186")
+
+    def _test_list_access_rules(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_access_rules,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ACCESS_RULES,
+            bytes_body,
+            user_id="123456")
+
+    def test_show_access_rule_with_str_body(self):
+        self._test_show_access_rule()
+
+    def test_show_access_rule_with_bytes_body(self):
+        self._test_show_access_rule(bytes_body=True)
+
+    def test_list_access_rule_with_str_body(self):
+        self._test_list_access_rules()
+
+    def test_list_access_rule_with_bytes_body(self):
+        self._test_list_access_rules(bytes_body=True)
+
+    def test_delete_access_rule(self):
+        self.check_service_client_function(
+            self.client.delete_access_rule,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123456",
+            access_rule_id="5499a186",
+            status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py b/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
index 7faf6a0..e5f7a66 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoint_filter_client.py
@@ -83,6 +83,36 @@
         }
     }
 
+    FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT = {
+        "endpoint_groups": [
+            {
+                "endpoint_group": {
+                    "description": "endpoint group description #2",
+                    "filters": {
+                        "interface": "admin"
+                    },
+                    "id": "3de68c",
+                    "name": "endpoint group name #2"
+                }
+            }
+            ],
+        "links": {
+            "self": "https://url/identity/v3/OS-EP-FILTER/endpoint_groups",
+        }
+    }
+
+    FAKE_PROJECT_INFO = {
+        "project": {
+            "domain_id": "1789d1",
+            "id": "263fd9",
+            "links": {
+                "self": "http://example.com/identity/v3/projects/263fd9"
+            },
+            "name": "project name #1",
+            "description": "project description #1"
+        }
+    }
+
     def setUp(self):
         super(TestEndPointsFilterClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -137,6 +167,52 @@
             project_id=3,
             endpoint_id=4)
 
+    def _test_list_endpoint_groups_for_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoint_groups_for_project,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ENDPOINT_GROUPS_FOR_PROJECT,
+            bytes_body,
+            status=200,
+            project_id=3)
+
+    def _test_list_projects_for_endpoint_group(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_projects_for_endpoint_group,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_PROJECTS_FOR_ENDPOINTS,
+            bytes_body,
+            status=200,
+            endpoint_group_id=5)
+
+    def _test_list_endpoints_for_endpoint_group(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoints_for_endpoint_group,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ENDPOINTS_FOR_PROJECTS,
+            bytes_body,
+            status=200,
+            endpoint_group_id=5)
+
+    def _test_add_endpoint_group_to_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.add_endpoint_group_to_project,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            status=204,
+            endpoint_group_id=5,
+            project_id=6)
+
+    def _test_show_endpoint_group_for_project(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_endpoint_group_for_project,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROJECT_INFO,
+            bytes_body,
+            endpoint_group_id=5,
+            project_id=6)
+
     def test_add_endpoint_to_project_with_str_body(self):
         self._test_add_endpoint_to_project()
 
@@ -163,3 +239,43 @@
 
     def test_delete_endpoint_from_project(self):
         self._test_delete_endpoint_from_project()
+
+    def test_list_endpoint_groups_for_project_with_str_body(self):
+        self._test_list_endpoint_groups_for_project()
+
+    def test_list_endpoint_groups_for_project_with_bytes_body(self):
+        self._test_list_endpoint_groups_for_project(bytes_body=True)
+
+    def test_list_projects_for_endpoint_group_with_str_body(self):
+        self._test_list_projects_for_endpoint_group()
+
+    def test_list_projects_for_endpoint_group_with_bytes_body(self):
+        self._test_list_projects_for_endpoint_group(bytes_body=True)
+
+    def test_list_endpoints_for_endpoint_group_with_str_body(self):
+        self._test_list_endpoints_for_endpoint_group()
+
+    def test_list_endpoints_for_endpoint_group_with_bytes_body(self):
+        self._test_list_endpoints_for_endpoint_group(bytes_body=True)
+
+    def test_add_endpoint_group_to_project_with_str_body(self):
+        self._test_add_endpoint_group_to_project()
+
+    def test_add_endpoint_group_to_project_with_bytes_body(self):
+        self._test_add_endpoint_group_to_project(bytes_body=True)
+
+    def test_show_endpoint_group_for_project_with_str_body(self):
+        self._test_show_endpoint_group_for_project()
+
+    def test_show_endpoint_group_for_project_with_bytes_body(self):
+        self._test_show_endpoint_group_for_project(bytes_body=True)
+
+    def test_delete_endpoint_group_from_project(self):
+        self.check_service_client_function(
+            self.client.delete_endpoint_group_from_project,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            False,
+            status=204,
+            endpoint_group_id=5,
+            project_id=5)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
index ca15dd1..0efc462 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
@@ -54,12 +54,44 @@
     }
 
     FAKE_SERVICE_ID = "a4dc5060-f757-4662-b658-edd2aefbb41d"
+    FAKE_ENDPOINT_ID = "b335d394-cdb9-4519-b95d-160b7706e54ew"
+
+    FAKE_UPDATE_ENDPOINT = {
+        "endpoint": {
+            "id": "828384",
+            "interface": "internal",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region_id": "north",
+            "service_id": "686766",
+            "url": "http://example.com/identity/v3/"
+                   "endpoints/828384"
+        }
+    }
+
+    FAKE_SHOW_ENDPOINT = {
+        "endpoint": {
+            "enabled": True,
+            "id": "01c3d5b92f7841ac83fb4b26173c12c7",
+            "interface": "admin",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region": "RegionOne",
+            "region_id": "RegionOne",
+            "service_id": "3b2d6ad7e02c4cde8498a547601f1b8f",
+            "url": "http://23.253.211.234:9696/"
+        }
+    }
 
     def setUp(self):
         super(TestEndpointsClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = endpoints_client.EndPointsClient(fake_auth,
-                                                       'identity', 'regionOne')
+        self.client = endpoints_client.EndPointsClient(
+            fake_auth, 'identity', 'regionOne')
 
     def _test_create_endpoint(self, bytes_body=False):
         self.check_service_client_function(
@@ -84,6 +116,38 @@
             mock_args=[mock_args],
             **params)
 
+    def _test_update_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_endpoint,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_UPDATE_ENDPOINT,
+            bytes_body,
+            endpoint_id=self.FAKE_ENDPOINT_ID,
+            interface="public",
+            region_id="north",
+            url="http://example.com/identity/v3/endpoints/828384",
+            service_id=self.FAKE_SERVICE_ID)
+
+    def _test_show_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_endpoint,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_ENDPOINT,
+            bytes_body,
+            endpoint_id="3456")
+
+    def test_update_endpoint_with_str_body(self):
+        self._test_update_endpoint()
+
+    def test_update_endpoint_with_bytes_body(self):
+        self._test_update_endpoint(bytes_body=True)
+
+    def test_show_endpoint_with_str_body(self):
+        self._test_show_endpoint()
+
+    def test_show_endpoint_with_bytes_body(self):
+        self._test_show_endpoint(bytes_body=True)
+
     def test_create_endpoint_with_str_body(self):
         self._test_create_endpoint()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
new file mode 100644
index 0000000..964c51b
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
@@ -0,0 +1,142 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import identity_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestIdentityProvidersClient(base.BaseServiceTest):
+    FAKE_IDENTITY_PROVIDERS_INFO = {
+        "identity_providers": [
+            {
+                "domain_id": "FAKE_DOMAIN_ID",
+                "description": "FAKE IDENTITY PROVIDER",
+                "remote_ids": ["fake_id_1", "fake_id_2"],
+                "enabled": True,
+                "id": "FAKE_ID",
+                "links": {
+                    "protocols": "http://example.com/identity/v3/" +
+                                 "OS-FEDERATION/identity_providers/" +
+                                 "FAKE_ID/protocols",
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "identity_providers/FAKE_ID"
+                }
+            }
+        ],
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "identity_providers"
+        }
+    }
+
+    FAKE_IDENTITY_PROVIDER_INFO = {
+        "identity_provider": {
+            "authorization_ttl": None,
+            "domain_id": "FAKE_DOMAIN_ID",
+            "description": "FAKE IDENTITY PROVIDER",
+            "remote_ids": ["fake_id_1", "fake_id_2"],
+            "enabled": True,
+            "id": "ACME",
+            "links": {
+                "protocols": "http://example.com/identity/v3/OS-FEDERATION/" +
+                             "identity_providers/FAKE_ID/protocols",
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID"
+            }
+        }
+    }
+
+    def setUp(self):
+        super(TestIdentityProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = identity_providers_client.IdentityProvidersClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_register_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.register_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=201)
+
+    def _test_list_identity_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_identity_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_IDENTITY_PROVIDERS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_get_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=200)
+
+    def _test_delete_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=204)
+
+    def _test_update_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_IDENTITY_PROVIDER_INFO,
+            bytes_body,
+            identity_provider_id="FAKE_ID",
+            status=200)
+
+    def test_register_identity_provider_with_str_body(self):
+        self._test_register_identity_provider()
+
+    def test_register_identity_provider_with_bytes_body(self):
+        self._test_register_identity_provider(bytes_body=True)
+
+    def test_list_identity_providers_with_str_body(self):
+        self._test_list_identity_providers()
+
+    def test_list_identity_providers_with_bytes_body(self):
+        self._test_list_identity_providers(bytes_body=True)
+
+    def test_get_identity_provider_with_str_body(self):
+        self._test_get_identity_provider()
+
+    def test_get_identity_provider_with_bytes_body(self):
+        self._test_get_identity_provider(bytes_body=True)
+
+    def test_delete_identity_provider_with_str_body(self):
+        self._test_delete_identity_provider()
+
+    def test_delete_identity_provider_with_bytes_body(self):
+        self._test_delete_identity_provider(bytes_body=True)
+
+    def test_update_identity_provider_with_str_body(self):
+        self._test_update_identity_provider()
+
+    def test_update_identity_provider_with_bytes_body(self):
+        self._test_update_identity_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_mappings_client.py b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
new file mode 100644
index 0000000..845a3f9
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
@@ -0,0 +1,183 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import mappings_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestMappingsClient(base.BaseServiceTest):
+    FAKE_MAPPING_INFO = {
+        "mapping": {
+            "id": "fake123",
+            "links": {
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "mappings/fake123"
+            },
+            "rules": [
+                {
+                    "local": [
+                        {
+                            "user": {
+                                "name": "{0}"
+                            }
+                        },
+                        {
+                            "group": {
+                                "id": "0cd5e9"
+                            }
+                        }
+                    ],
+                    "remote": [
+                        {
+                            "type": "UserName"
+                        },
+                        {
+                            "type": "orgPersonType",
+                            "not_any_of": [
+                                "Contractor",
+                                "Guest"
+                            ]
+                        }
+                    ]
+                }
+            ]
+        }
+    }
+
+    FAKE_MAPPINGS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/mappings"
+        },
+        "mappings": [
+            {
+                "id": "fake123",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "mappings/fake123"
+                },
+                "rules": [
+                    {
+                        "local": [
+                            {
+                                "user": {
+                                    "name": "{0}"
+                                }
+                            },
+                            {
+                                "group": {
+                                    "id": "0cd5e9"
+                                }
+                            }
+                        ],
+                        "remote": [
+                            {
+                                "type": "UserName"
+                            },
+                            {
+                                "type": "orgPersonType",
+                                "any_one_of": [
+                                    "Contractor",
+                                    "SubContractor"
+                                ]
+                            }
+                        ]
+                    }
+                ]
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestMappingsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = mappings_client.MappingsClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_create_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_mapping,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=201)
+
+    def _test_get_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_mapping,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=200)
+
+    def _test_update_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_mapping,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_MAPPING_INFO,
+            bytes_body,
+            mapping_id="fake123",
+            status=200)
+
+    def _test_list_mappings(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_mappings,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_MAPPINGS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_delete_mapping(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_mapping,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            mapping_id="fake123",
+            status=204)
+
+    def test_create_mapping_with_str_body(self):
+        self._test_create_mapping()
+
+    def test_create_mapping_with_bytes_body(self):
+        self._test_create_mapping(bytes_body=True)
+
+    def test_get_mapping_with_str_body(self):
+        self._test_get_mapping()
+
+    def test_get_mapping_with_bytes_body(self):
+        self._test_get_mapping(bytes_body=True)
+
+    def test_update_mapping_with_str_body(self):
+        self._test_update_mapping()
+
+    def test_update_mapping_with_bytes_body(self):
+        self._test_update_mapping(bytes_body=True)
+
+    def test_list_mappings_with_str_body(self):
+        self._test_list_mappings()
+
+    def test_list_mappings_with_bytes_body(self):
+        self._test_list_mappings(bytes_body=True)
+
+    def test_delete_mapping_with_str_body(self):
+        self._test_delete_mapping()
+
+    def test_delete_mapping_with_bytes_body(self):
+        self._test_delete_mapping(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_policies_client.py b/tempest/tests/lib/services/identity/v3/test_policies_client.py
index 0237475..4fc800a 100644
--- a/tempest/tests/lib/services/identity/v3/test_policies_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_policies_client.py
@@ -44,6 +44,34 @@
             }
         }
 
+    FAKE_ENDPOINT_INFO = {
+        "endpoints": [
+            {
+                "id": "1",
+                "interface": "public",
+                "links": {
+                    "self": "http://example.com/identity/v3/endpoints/1"
+                },
+                "region": "north",
+                "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596",
+                "url": "http://example.com/identity/"
+            },
+            {
+                "id": "1",
+                "interface": "internal",
+                "links": {
+                    "self": "http://example.com/identity/v3/endpoints/1"
+                },
+                "region": "south",
+                "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596",
+                "url": "http://example.com/identity/"
+            }
+        ],
+        "links": {
+            "self": "http://exmp.com/identity/v3/OS-ENDPOINT-POLICY/policies/1"
+        }
+    }
+
     FAKE_LIST_POLICIES = {
         "links": {
             "next": None,
@@ -238,3 +266,33 @@
             service_id=self.FAKE_SERVICE_ID,
             region_id=self.FAKE_REGION_ID,
             status=204)
+
+    def _test_list_endpoints_for_policy(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_endpoints_for_policy,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ENDPOINT_INFO,
+            bytes_body,
+            policy_id=self.FAKE_POLICY_ID,
+            status=200)
+
+    def test_list_endpoints_for_policy_with_str_body(self):
+        self._test_list_endpoints_for_policy()
+
+    def test_list_endpoints_for_policy_with_bytes_body(self):
+        self._test_list_endpoints_for_policy(bytes_body=True)
+
+    def _test_list_policy_for_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_policy_for_endpoint,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_POLICY_INFO,
+            bytes_body,
+            endpoint_id=self.FAKE_ENDPOINT_ID,
+            status=200)
+
+    def test_list_policy_for_endpoint_with_str_body(self):
+        self._test_list_policy_for_endpoint()
+
+    def test_list_policy_for_endpoint_with_bytes_body(self):
+        self._test_list_policy_for_endpoint(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_protocols_client.py b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
new file mode 100644
index 0000000..c1d04f4
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
@@ -0,0 +1,140 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import protocols_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestProtocolsClient(base.BaseServiceTest):
+    FAKE_PROTOCOLS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "identity_providers/FAKE_ID/protocols"
+        },
+        "protocols": [
+            {
+                "id": "fake_id1",
+                "links": {
+                    "identity_provider": "http://example.com/identity/v3/" +
+                                         "OS-FEDERATION/identity_providers/" +
+                                         "FAKE_ID",
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/"
+                            "identity_providers/FAKE_ID/protocols/fake_id1"
+                },
+                "mapping_id": "fake123"
+            }
+        ]
+    }
+
+    FAKE_PROTOCOL_INFO = {
+        "protocol": {
+            "id": "fake_id1",
+            "links": {
+                "identity_provider": "http://example.com/identity/v3/OS-" +
+                                     "FEDERATION/identity_providers/FAKE_ID",
+                "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID/protocols/fake_id1"
+            },
+            "mapping_id": "fake123"
+        }
+    }
+
+    def setUp(self):
+        super(TestProtocolsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = protocols_client.ProtocolsClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_add_protocol_to_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.add_protocol_to_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=201)
+
+    def _test_list_protocols_of_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_protocols_of_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROTOCOLS_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            status=200)
+
+    def _test_get_protocol_for_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_protocol_for_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=200)
+
+    def _test_update_mapping_for_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_mapping_for_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_PROTOCOL_INFO,
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=200)
+
+    def _test_delete_protocol_from_identity_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_protocol_from_identity_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            idp_id="FAKE_ID",
+            protocol_id="fake_id1",
+            status=204)
+
+    def test_add_protocol_to_identity_provider_with_str_body(self):
+        self._test_add_protocol_to_identity_provider()
+
+    def test_add_protocol_to_identity_provider_with_bytes_body(self):
+        self._test_add_protocol_to_identity_provider(bytes_body=True)
+
+    def test_list_protocols_of_identity_provider_with_str_body(self):
+        self._test_list_protocols_of_identity_provider()
+
+    def test_list_protocols_of_identity_provider_with_bytes_body(self):
+        self._test_list_protocols_of_identity_provider(bytes_body=True)
+
+    def test_get_protocol_for_identity_provider_with_str_body(self):
+        self._test_get_protocol_for_identity_provider()
+
+    def test_get_protocol_for_identity_provider_with_bytes_body(self):
+        self._test_get_protocol_for_identity_provider(bytes_body=True)
+
+    def test_update_mapping_for_identity_provider_with_str_body(self):
+        self._test_update_mapping_for_identity_provider()
+
+    def test_update_mapping_for_identity_provider_with_bytes_body(self):
+        self._test_update_mapping_for_identity_provider(bytes_body=True)
+
+    def test_delete_protocol_from_identity_provider_with_str_body(self):
+        self._test_delete_protocol_from_identity_provider()
+
+    def test_delete_protocol_from_identity_provider_with_bytes_body(self):
+        self._test_delete_protocol_from_identity_provider(bytes_body=False)
diff --git a/tempest/tests/lib/services/identity/v3/test_roles_client.py b/tempest/tests/lib/services/identity/v3/test_roles_client.py
index 8d6bb42..e963310 100644
--- a/tempest/tests/lib/services/identity/v3/test_roles_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_roles_client.py
@@ -225,6 +225,16 @@
             role_id="1234",
             status=204)
 
+    def _test_create_user_role_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_user_role_on_system,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def _test_list_user_roles_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.list_user_roles_on_project,
@@ -243,6 +253,14 @@
             domain_id="b344506af7644f6794d9cb316600b020",
             user_id="123")
 
+    def _test_list_user_roles_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_user_roles_on_system,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ROLES,
+            bytes_body,
+            user_id="123")
+
     def _test_create_group_role_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_group_role_on_project,
@@ -265,6 +283,16 @@
             role_id="1234",
             status=204)
 
+    def _test_create_group_role_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_group_role_on_system,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            bytes_body,
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def _test_list_group_roles_on_project(self, bytes_body=False):
         self.check_service_client_function(
             self.client.list_group_roles_on_project,
@@ -283,6 +311,15 @@
             domain_id="b344506af7644f6794d9cb316600b020",
             group_id="123")
 
+    def _test_list_group_roles_on_system(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_group_roles_on_system,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_ROLES,
+            bytes_body,
+            domain_id="b344506af7644f6794d9cb316600b020",
+            group_id="123")
+
     def _test_create_role_inference_rule(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_role_inference_rule,
@@ -405,6 +442,15 @@
             role_id="1234",
             status=204)
 
+    def test_delete_role_from_user_on_system(self):
+        self.check_service_client_function(
+            self.client.delete_role_from_user_on_system,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def test_delete_role_from_group_on_project(self):
         self.check_service_client_function(
             self.client.delete_role_from_group_on_project,
@@ -425,6 +471,15 @@
             role_id="1234",
             status=204)
 
+    def test_delete_role_from_group_on_system(self):
+        self.check_service_client_function(
+            self.client.delete_role_from_group_on_system,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def test_check_user_role_existence_on_project(self):
         self.check_service_client_function(
             self.client.check_user_role_existence_on_project,
@@ -445,6 +500,15 @@
             role_id="1234",
             status=204)
 
+    def test_check_user_role_existence_on_system(self):
+        self.check_service_client_function(
+            self.client.check_user_role_existence_on_system,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            user_id="123",
+            role_id="1234",
+            status=204)
+
     def test_check_role_from_group_on_project_existence(self):
         self.check_service_client_function(
             self.client.check_role_from_group_on_project_existence,
@@ -465,6 +529,15 @@
             role_id="1234",
             status=204)
 
+    def test_check_role_from_group_on_system_existence(self):
+        self.check_service_client_function(
+            self.client.check_role_from_group_on_system_existence,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            group_id="123",
+            role_id="1234",
+            status=204)
+
     def test_create_role_inference_rule_with_str_body(self):
         self._test_create_role_inference_rule()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_service_providers_client.py b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
new file mode 100644
index 0000000..ec908bc
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
@@ -0,0 +1,157 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import service_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServiceProvidersClient(base.BaseServiceTest):
+    FAKE_SERVICE_PROVIDER_INFO = {
+        "service_provider": {
+            "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+                        "identity_providers/FAKE_ID/protocols/fake_id1/auth",
+            "description": "Fake Service Provider",
+            "enabled": True,
+            "id": "FAKE_ID",
+            "links": {
+                "self": "https://example.com/identity/v3/OS-FEDERATION/" +
+                        "service_providers/FAKE_ID"
+            },
+            "relay_state_prefix": "ss:mem:",
+            "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+                      "FAKE_ID1/ECP"
+        }
+    }
+
+    FAKE_SERVICE_PROVIDERS_INFO = {
+        "links": {
+            "next": None,
+            "previous": None,
+            "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                    "service_providers"
+        },
+        "service_providers": [
+            {
+                "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+                            "identity_providers/acme/protocols/saml2/auth",
+                "description": "Stores ACME identities",
+                "enabled": True,
+                "id": "ACME",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "service_providers/ACME"
+                },
+                "relay_state_prefix": "ss:mem:",
+                "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+                          "SAML2/ECP"
+            },
+            {
+                "auth_url": "https://other.example.com/identity/v3/" +
+                            "OS-FEDERATION/identity_providers/acme/" +
+                            "protocols/saml2/auth",
+                "description": "Stores contractor identities",
+                "enabled": False,
+                "id": "ACME-contractors",
+                "links": {
+                    "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+                            "service_providers/ACME-contractors"
+                },
+                "relay_state_prefix": "ss:mem:",
+                "sp_url": "https://other.example.com/identity/Shibboleth" +
+                          ".sso/SAML2/ECP"
+            }
+        ]
+    }
+
+    def setUp(self):
+        super(TestServiceProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = service_providers_client.ServiceProvidersClient(
+            fake_auth, 'identity', 'regionOne')
+
+    def _test_register_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.register_service_provider,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=201)
+
+    def _test_list_service_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_service_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SERVICE_PROVIDERS_INFO,
+            bytes_body,
+            status=200)
+
+    def _test_get_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.get_service_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=200)
+
+    def _test_delete_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.delete_service_provider,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=204)
+
+    def _test_update_service_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_service_provider,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_SERVICE_PROVIDER_INFO,
+            bytes_body,
+            service_provider_id="FAKE_ID",
+            status=200)
+
+    def test_register_service_provider_with_str_body(self):
+        self._test_register_service_provider()
+
+    def test_register_service_provider_with_bytes_body(self):
+        self._test_register_service_provider(bytes_body=True)
+
+    def test_list_service_providers_with_str_body(self):
+        self._test_list_service_providers()
+
+    def test_list_service_providers_with_bytes_body(self):
+        self._test_list_service_providers(bytes_body=True)
+
+    def test_get_service_provider_with_str_body(self):
+        self._test_get_service_provider()
+
+    def test_get_service_provider_with_bytes_body(self):
+        self._test_get_service_provider(bytes_body=True)
+
+    def test_delete_service_provider_with_str_body(self):
+        self._test_delete_service_provider()
+
+    def test_delete_service_provider_with_bytes_body(self):
+        self._test_delete_service_provider(bytes_body=True)
+
+    def test_update_service_provider_with_str_body(self):
+        self._test_update_service_provider()
+
+    def test_update_service_provider_with_bytes_body(self):
+        self._test_update_service_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_trusts_client.py b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
index a1ca020..33dca7d 100644
--- a/tempest/tests/lib/services/identity/v3/test_trusts_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
@@ -94,6 +94,35 @@
             }
         }
 
+    FAKE_LIST_TRUSTS_ROLES = {
+        "roles": [
+            {
+                "id": "c1648e",
+                "links": {
+                    "self": "http://example.com/identity/v3/roles/c1648e"
+                    },
+                "name": "manager"
+                },
+            {
+                "id": "ed7b78",
+                "links": {
+                    "self": "http://example.com/identity/v3/roles/ed7b78"
+                    },
+                "name": "member"
+                }
+            ]
+        }
+
+    FAKE_TRUST_ROLE = {
+        "role": {
+            "id": "c1648e",
+            "links": {
+                "self": "http://example.com/identity/v3/roles/c1648e"
+                },
+            "name": "manager"
+            }
+        }
+
     def setUp(self):
         super(TestTrustsClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -123,6 +152,43 @@
             self.FAKE_LIST_TRUSTS,
             bytes_body)
 
+    def _test_list_trust_roles(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_trust_roles,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_TRUSTS_ROLES,
+            bytes_body,
+            trust_id="1ff900")
+
+    def test_check_trust_role(self):
+        self.check_service_client_function(
+            self.client.check_trust_role,
+            'tempest.lib.common.rest_client.RestClient.head',
+            {},
+            trust_id="1ff900",
+            role_id="ed7b78")
+
+    def _check_show_trust_role(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_trust_role,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_TRUST_ROLE,
+            bytes_body,
+            trust_id="1ff900",
+            role_id="ed7b78")
+
+    def test_list_trust_roles_with_str_body(self):
+        self._test_list_trust_roles()
+
+    def test_list_trust_roles_with_bytes_body(self):
+        self._test_list_trust_roles(bytes_body=True)
+
+    def test_check_show_trust_role_with_str_body(self):
+        self._check_show_trust_role()
+
+    def test_check_show_trust_role_with_bytes_body(self):
+        self._check_show_trust_role(bytes_body=True)
+
     def test_create_trust_with_str_body(self):
         self._test_create_trust()
 
diff --git a/tempest/tests/lib/services/identity/v3/test_users_client.py b/tempest/tests/lib/services/identity/v3/test_users_client.py
index c0dfdae..7be0480 100644
--- a/tempest/tests/lib/services/identity/v3/test_users_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_users_client.py
@@ -141,6 +141,35 @@
         ]
     }
 
+    FAKE_USER_EC2_CREDENTIAL_INFO = {
+        "credential": {
+            'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+            'access': '79abf59acc77492a86170cbe2f1feafa',
+            'secret': 'c4e7d3a691fd4563873d381a40320f46',
+            'trust_id': None,
+            'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+        }
+    }
+
+    FAKE_LIST_USER_EC2_CREDENTIALS = {
+        "credentials": [
+            {
+                'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+                'access': '79abf59acc77492a86170cbe2f1feafa',
+                'secret': 'c4e7d3a691fd4563873d381a40320f46',
+                'trust_id': None,
+                'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+            },
+            {
+                'user_id': '3beb0e12f3e5416db8d7cccfc785de4r',
+                'access': '45abf59acc77492a86170cbe2f1fesde',
+                'secret': 'g4e7d3a691fd4563873d381a40320e45',
+                'trust_id': None,
+                'tenant_id': '123557269d7b4dd78631a602eb9f112f'
+            }
+        ]
+    }
+
     def setUp(self):
         super(TestUsersClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -201,6 +230,33 @@
             user_id='817fb3c23fd7465ba6d7fe1b1320121d',
         )
 
+    def _test_create_user_ec2_credential(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.create_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.post',
+            self.FAKE_USER_EC2_CREDENTIAL_INFO,
+            bytes_body,
+            status=201,
+            user_id="1",
+            tenant_id="123")
+
+    def _test_show_user_ec2_credential(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_USER_EC2_CREDENTIAL_INFO,
+            bytes_body,
+            user_id="1",
+            access="123")
+
+    def _test_list_user_ec2_credentials(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_user_ec2_credentials,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_LIST_USER_EC2_CREDENTIALS,
+            bytes_body,
+            user_id="1")
+
     def test_create_user_with_string_body(self):
         self._test_create_user()
 
@@ -255,3 +311,30 @@
             user_id='817fb3c23fd7465ba6d7fe1b1320121d',
             password='NewTempestPassword',
             original_password='OldTempestPassword')
+
+    def test_create_user_ec2_credential_with_str_body(self):
+        self._test_create_user_ec2_credential()
+
+    def test_create_user_ec2_credential_with_bytes_body(self):
+        self._test_create_user_ec2_credential(bytes_body=True)
+
+    def test_show_user_ec2_credential_with_str_body(self):
+        self._test_show_user_ec2_credential()
+
+    def test_show_user_ec2_credential_with_bytes_body(self):
+        self._test_show_user_ec2_credential(bytes_body=True)
+
+    def test_list_user_ec2_credentials_with_str_body(self):
+        self._test_list_user_ec2_credentials()
+
+    def test_list_user_ec2_credentials_with_bytes_body(self):
+        self._test_list_user_ec2_credentials(bytes_body=True)
+
+    def test_delete_user_ec2_credential(self):
+        self.check_service_client_function(
+            self.client.delete_user_ec2_credential,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            user_id="123",
+            access="1234",
+            status=204)
diff --git a/tempest/tests/lib/services/image/v2/test_images_client.py b/tempest/tests/lib/services/image/v2/test_images_client.py
index fe671bd..5b162f8 100644
--- a/tempest/tests/lib/services/image/v2/test_images_client.py
+++ b/tempest/tests/lib/services/image/v2/test_images_client.py
@@ -12,7 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
+import io
 
 from tempest.lib.common.utils import data_utils
 from tempest.lib.services.image.v2 import images_client
@@ -105,6 +105,44 @@
         "first": "/v2/images"
     }
 
+    FAKE_SHOW_IMAGE_TASKS = {
+        "tasks": [
+            {
+                "id": "ee22890e-8948-4ea6-9668-831f973c84f5",
+                "image_id": "dddddddd-dddd-dddd-dddd-dddddddddddd",
+                "request-id": "rrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr",
+                "user": "uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu",
+                "type": "api_image_import",
+                "status": "processing",
+                "owner": "64f0efc9955145aeb06f297a8a6fe402",
+                "expires_at": None,
+                "created_at": "2020-12-18T05:20:38.000000",
+                "updated_at": "2020-12-18T05:25:39.000000",
+                "deleted_at": None,
+                "deleted": False,
+                "input": {
+                    "image_id": "829c729b-ebc4-4cc7-a164-6f43f1149b17",
+                    "import_req": {
+                        "method": {
+                            "name": "copy-image",
+                        },
+                        "all_stores": True,
+                        "all_stores_must_succeed": False,
+                    },
+                    "backend": [
+                        "fast",
+                        "cheap",
+                        "slow",
+                        "reliable",
+                        "common",
+                    ]
+                },
+                "result": None,
+                "message": "Copied 15 MiB",
+            }
+        ]
+    }
+
     FAKE_TAG_NAME = "fake tag"
 
     def setUp(self):
@@ -178,7 +216,7 @@
             {}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
 
     def test_store_image_file(self):
-        data = six.BytesIO(data_utils.random_bytes())
+        data = io.BytesIO(data_utils.random_bytes())
 
         self.check_service_client_function(
             self.client.store_image_file,
@@ -230,3 +268,11 @@
 
     def test_list_images_with_bytes_body(self):
         self._test_list_images(bytes_body=True)
+
+    def test_show_image_tasks(self):
+        self.check_service_client_function(
+            self.client.show_image_tasks,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_IMAGE_TASKS,
+            True,
+            image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8")
diff --git a/tempest/tests/lib/services/image/v2/test_versions_client.py b/tempest/tests/lib/services/image/v2/test_versions_client.py
index 6234b06..98c558a 100644
--- a/tempest/tests/lib/services/image/v2/test_versions_client.py
+++ b/tempest/tests/lib/services/image/v2/test_versions_client.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations
 # under the License.
 
+import fixtures
+
 from tempest.lib.services.image.v2 import versions_client
 from tempest.tests.lib import fake_auth_provider
 from tempest.tests.lib.services import base
@@ -92,3 +94,13 @@
 
     def test_list_versions_with_bytes_body(self):
         self._test_list_versions(bytes_body=True)
+
+    def test_has_version(self):
+        mocked_r = self.create_response(self.FAKE_VERSIONS_INFO, False,
+                                        300, None)
+        self.useFixture(fixtures.MockPatch(
+            'tempest.lib.common.rest_client.RestClient.raw_request',
+            return_value=mocked_r))
+
+        self.assertTrue(self.client.has_version('2.1'))
+        self.assertFalse(self.client.has_version('9.9'))
diff --git a/tempest/tests/lib/services/network/test_floating_ips_client.py b/tempest/tests/lib/services/network/test_floating_ips_client.py
index c5b1845..e8f2e5a 100644
--- a/tempest/tests/lib/services/network/test_floating_ips_client.py
+++ b/tempest/tests/lib/services/network/test_floating_ips_client.py
@@ -27,6 +27,8 @@
             {
                 "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip",
                 "created_at": "2016-12-21T10:55:50Z",
                 "updated_at": "2016-12-21T10:55:53Z",
                 "revision_number": 1,
@@ -37,11 +39,24 @@
                 "floating_ip_address": "172.24.4.228",
                 "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
                 "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
-                "status": "ACTIVE"
+                "status": "ACTIVE",
+                "port_details": {
+                    "status": "ACTIVE",
+                    "name": "",
+                    "admin_state_up": True,
+                    "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+                    "device_owner": "compute:nova",
+                    "mac_address": "fa:16:3e:b1:3b:30",
+                    "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+                },
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             },
             {
                 "router_id": None,
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip2",
                 "created_at": "2016-12-21T11:55:50Z",
                 "updated_at": "2016-12-21T11:55:53Z",
                 "revision_number": 2,
@@ -52,7 +67,10 @@
                 "floating_ip_address": "172.24.4.227",
                 "port_id": None,
                 "id": "61cea855-49cb-4846-997d-801b70c71bdd",
-                "status": "DOWN"
+                "status": "DOWN",
+                "port_details": None,
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             }
         ]
     }
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
index 078f4b0..17233bc 100644
--- a/tempest/tests/lib/services/network/test_networks_client.py
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -31,12 +31,17 @@
                     "nova"
                 ],
                 "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net1",
                 "port_security_enabled": True,
                 "project_id": "4fd44f30292945e481c7b8a0c8908869",
                 "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+                "revision_number": 1,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -46,7 +51,8 @@
                 "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": True,
-                "description": ""
+                "description": "",
+                "is_default": False
             },
             {
                 "admin_state_up": True,
@@ -54,12 +60,18 @@
                 "availability_zones": [
                     "nova"
                 ],
+                "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net2",
                 "port_security_enabled": True,
                 "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+                "revision_number": 3,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -69,7 +81,8 @@
                 "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": False,
-                "description": ""
+                "description": "",
+                "is_default": False
             }
         ]
     }
@@ -108,6 +121,7 @@
                 "alive": True,
                 "topic": "dhcp_agent",
                 "host": "osboxes",
+                "ha_state": None,
                 "agent_type": "DHCP agent",
                 "resource_versions": {},
                 "created_at": "2017-06-19 21:39:51",
diff --git a/tempest/tests/lib/services/network/test_ports_client.py b/tempest/tests/lib/services/network/test_ports_client.py
index 20ef3f1..9ca9ac6 100644
--- a/tempest/tests/lib/services/network/test_ports_client.py
+++ b/tempest/tests/lib/services/network/test_ports_client.py
@@ -22,53 +22,126 @@
 
 class TestPortsClient(base.BaseServiceTest):
 
+    FAKE_CREATE_PORTS = {
+        "port": {
+            "binding:host_id": "4df8d9ff-6f6f-438f-90a1-ef660d4586ad",
+            "binding:profile": {
+                "local_link_information": [
+                    {
+                        "port_id": "Ethernet3/1",
+                        "switch_id": "0a:1b:2c:3d:4e:5f",
+                        "switch_info": "switch1"
+                    }
+                ]
+            },
+            "binding:vnic_type": "baremetal",
+            "device_id": "d90a13da-be41-461f-9f99-1dbcf438fdf2",
+            "device_owner": "baremetal:none",
+            "dns_domain": "my-domain.org.",
+            "dns_name": "myport",
+            "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+            "uplink_status_propagation": False
+        }
+    }
+
     FAKE_PORTS = {
         "ports": [
             {
                 "admin_state_up": True,
                 "allowed_address_pairs": [],
+                "created_at": "2016-03-08T20:19:41",
                 "data_plane_status": None,
                 "description": "",
                 "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
                 "device_owner": "network:router_gateway",
-                "extra_dhcp_opts": [],
+                "dns_assignment": [
+                    {
+                        "hostname": "myport",
+                        "ip_address": "172.24.4.2",
+                        "fqdn": "myport.my-domain.org"
+                    }
+                ],
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myport",
+                "extra_dhcp_opts": [
+                    {
+                        "opt_value": "pxelinux.0",
+                        "ip_version": 4,
+                        "opt_name": "bootfile-name"
+                    }
+                ],
                 "fixed_ips": [
                     {
                         "ip_address": "172.24.4.2",
-                        "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062"
+                        "subnet_id":
+                            "008ba151-0b8c-4a67-98b5-0d2b87666062"
                     }
                 ],
                 "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
+                "ip_allocation": "immediate",
                 "mac_address": "fa:16:3e:58:42:ed",
                 "name": "",
                 "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
                 "project_id": "",
+                "revision_number": 1,
                 "security_groups": [],
                 "status": "ACTIVE",
-                "tenant_id": ""
+                "tags": ["tag1,tag2"],
+                "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa",
+                "updated_at": "2016-03-08T20:19:41",
+                "qos_network_policy_id":
+                    "174dd0c1-a4eb-49d4-a807-ae80246d82f4",
+                "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+                "port_security_enabled": False,
+                "uplink_status_propagation": False
             },
             {
                 "admin_state_up": True,
                 "allowed_address_pairs": [],
+                "created_at": "2016-03-08T20:19:41",
                 "data_plane_status": None,
                 "description": "",
                 "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
                 "device_owner": "network:router_interface",
-                "extra_dhcp_opts": [],
+                "dns_assignment": [
+                    {
+                        "hostname": "myport2",
+                        "ip_address": "10.0.0.1",
+                        "fqdn": "myport2.my-domain.org"
+                    }
+                ],
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myport2",
+                "extra_dhcp_opts": [
+                    {
+                        "opt_value": "pxelinux.0",
+                        "ip_version": 4,
+                        "opt_name": "bootfile-name"
+                    }
+                ],
                 "fixed_ips": [
                     {
                         "ip_address": "10.0.0.1",
-                        "subnet_id": "288bf4a1-51ba-43b6-9d0a-520e9005db17"
+                        "subnet_id":
+                            "288bf4a1-51ba-43b6-9d0a-520e9005db17"
                     }
                 ],
                 "id": "f71a6703-d6de-4be1-a91a-a570ede1d159",
+                "ip_allocation": "immediate",
                 "mac_address": "fa:16:3e:bb:3c:e4",
                 "name": "",
                 "network_id": "f27aa545-cbdd-4907-b0c6-c9e8b039dcc2",
                 "project_id": "d397de8a63f341818f198abb0966f6f3",
+                "revision_number": 1,
                 "security_groups": [],
                 "status": "ACTIVE",
-                "tenant_id": "d397de8a63f341818f198abb0966f6f3"
+                "tags": ["tag1,tag2"],
+                "tenant_id": "d397de8a63f341818f198abb0966f6f3",
+                "updated_at": "2016-03-08T20:19:41",
+                "qos_network_policy_id": None,
+                "qos_policy_id": None,
+                "port_security_enabled": False,
+                "uplink_status_propagation": False
             }
         ]
     }
@@ -112,7 +185,7 @@
         self.check_service_client_function(
             self.ports_client.create_port,
             "tempest.lib.common.rest_client.RestClient.post",
-            {"port": self.FAKE_PORTS["ports"][0]},
+            self.FAKE_CREATE_PORTS,
             bytes_body,
             201,
             **self.FAKE_PORT1)
diff --git a/tempest/tests/lib/services/network/test_trunks_client.py b/tempest/tests/lib/services/network/test_trunks_client.py
new file mode 100644
index 0000000..b637d5e
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_trunks_client.py
@@ -0,0 +1,201 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.services.network import trunks_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestTrunksClient(base.BaseServiceTest):
+
+    FAKE_TRUNK_ID = "dfbc2103-93cf-4edf-952a-ef6deb32ddc6"
+    FAKE_PORT_ID = "1f04eb36-6c84-11eb-b0ab-4fc62961629d"
+    FAKE_TRUNKS = {
+        "trunks": [
+            {
+                "admin_state_up": True,
+                "description": "",
+                "id": "dfbc2103-93cf-4edf-952a-ef6deb32ddc6",
+                "name": "trunk0",
+                "port_id": "00130aab-bb51-42a1-a7c4-6703a3a43aa5",
+                "project_id": "",
+                "revision_number": 2,
+                "status": "DOWN",
+                "sub_ports": [
+                    {
+                        "port_id": "87d2483d-e5e6-483d-b5f0-81b9ed1d1a91",
+                        "segmentation_id": 101,
+                        "segmentation_type": "vlan"
+                        }
+                    ],
+                "tags": [],
+            },
+            {
+                "admin_state_up": True,
+                "description": "",
+                "id": "9eb0e72e-11d3-4295-bcaf-6c89008d9f0a",
+                "name": "trunk1",
+                "port_id": "035a12bf-2ae3-42ae-8ad6-9f70640cddde",
+                "project_id": "",
+                "revision_number": 2,
+                "status": "DOWN",
+                "sub_ports": [
+                    {
+                        "port_id": "cba839d5-02e2-4e09-b964-81356da78165",
+                        "segmentation_id": 102,
+                        "segmentation_type": "vlan"
+                        }
+                    ],
+                "tags": [],
+            },
+        ]
+    }
+
+    FAKE_TRUNK_1 = {
+        "name": "trunk0",
+        "port_id": "00130aab-bb51-42a1-a7c4-6703a3a43aa5"
+    }
+
+    def setUp(self):
+        super(TestTrunksClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.trunks_client = trunks_client.TrunksClient(
+            fake_auth, "network", "regionOne")
+
+    def _test_create_trunk(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.create_trunk,
+            "tempest.lib.common.rest_client.RestClient.post",
+            {"trunk": self.FAKE_TRUNKS["trunks"][0]},
+            bytes_body,
+            201,
+            **self.FAKE_TRUNK_1)
+
+    def _test_list_trunks(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.list_trunks,
+            "tempest.lib.common.rest_client.RestClient.get",
+            self.FAKE_TRUNKS,
+            bytes_body,
+            200)
+
+    def _test_show_trunk(self, bytes_body=False):
+        self.check_service_client_function(
+            self.trunks_client.show_trunk,
+            "tempest.lib.common.rest_client.RestClient.get",
+            {"trunk": self.FAKE_TRUNKS["trunks"][0]},
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID)
+
+    def _test_update_trunk(self, bytes_body=False):
+        update_kwargs = {
+            "admin_state_up": True,
+            "name": "new_trunk"
+        }
+
+        resp_body = {
+            "trunk": copy.deepcopy(
+                self.FAKE_TRUNKS["trunks"][0]
+            )
+        }
+        resp_body["trunk"].update(update_kwargs)
+
+        self.check_service_client_function(
+            self.trunks_client.update_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            **update_kwargs)
+
+    def _test_add_subports_to_trunk(self, bytes_body=False):
+        sub_ports = [{
+            "port_id": "f04eb36-6c84-11eb-b0ab-4fc62961629d",
+            "segmentation_type": "vlan",
+            "segmentation_id": "1001"
+        }]
+        resp_body = copy.deepcopy(self.FAKE_TRUNKS["trunks"][0])
+
+        resp_body["sub_ports"].append(sub_ports)
+        self.check_service_client_function(
+            self.trunks_client.add_subports_to_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            sub_ports=sub_ports)
+
+    def _test_delete_subports_from_trunk(self, bytes_body=False):
+        fake_sub_ports = self.FAKE_TRUNKS['trunks'][0]['sub_ports']
+        sub_ports = [
+            {"port_id": fake_sub_ports[0]['port_id']}
+        ]
+        resp_body = copy.deepcopy(self.FAKE_TRUNKS["trunks"][0])
+
+        resp_body['sub_ports'] = []
+        self.check_service_client_function(
+            self.trunks_client.delete_subports_from_trunk,
+            "tempest.lib.common.rest_client.RestClient.put",
+            resp_body,
+            bytes_body,
+            200,
+            trunk_id=self.FAKE_TRUNK_ID,
+            sub_ports=sub_ports)
+
+    def test_create_trunk_with_str_body(self):
+        self._test_create_trunk()
+
+    def test_create_trunk_with_bytes_body(self):
+        self._test_create_trunk(bytes_body=True)
+
+    def test_list_trunks_with_str_body(self):
+        self._test_list_trunks()
+
+    def test_list_trunks_with_bytes_body(self):
+        self._test_list_trunks(bytes_body=True)
+
+    def test_show_trunk_with_str_body(self):
+        self._test_show_trunk()
+
+    def test_show_trunk_with_bytes_body(self):
+        self._test_show_trunk(bytes_body=True)
+
+    def test_update_trunk_with_str_body(self):
+        self._test_update_trunk()
+
+    def test_update_trunk_with_bytes_body(self):
+        self._test_update_trunk(bytes_body=True)
+
+    def test_add_subports_to_trunk_str_body(self):
+        self._test_add_subports_to_trunk()
+
+    def test_add_subports_to_trunk_bytes_body(self):
+        self._test_add_subports_to_trunk(bytes_body=True)
+
+    def test_delete_subports_from_trunk_str_body(self):
+        self._test_delete_subports_from_trunk()
+
+    def test_delete_subports_from_trunk_bytes_body(self):
+        self._test_delete_subports_from_trunk(bytes_body=True)
+
+    def test_delete_trunk(self):
+        self.check_service_client_function(
+            self.trunks_client.delete_trunk,
+            "tempest.lib.common.rest_client.RestClient.delete",
+            {},
+            status=204,
+            trunk_id=self.FAKE_TRUNK_ID)
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
index c646d61..d6df243 100644
--- a/tempest/tests/lib/services/object_storage/test_object_client.py
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -31,15 +31,18 @@
         self.object_client = object_client.ObjectClient(self.fake_auth,
                                                         'swift', 'region1')
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_object_continue_no_data(self, mock_poc):
         self._validate_create_object_continue(None, mock_poc)
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_object_continue_with_data(self, mock_poc):
         self._validate_create_object_continue('hello', mock_poc)
 
-    @mock.patch.object(object_client, '_create_connection')
+    @mock.patch('tempest.lib.services.object_storage.object_client.'
+                'ObjectClient._create_connection')
     def test_create_continue_with_no_continue_received(self, mock_poc):
         self._validate_create_object_continue('hello', mock_poc,
                                               initial_status=201)
diff --git a/tempest/tests/lib/services/placement/test_resource_providers_client.py b/tempest/tests/lib/services/placement/test_resource_providers_client.py
new file mode 100644
index 0000000..2871395
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_resource_providers_client.py
@@ -0,0 +1,206 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.placement import resource_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestResourceProvidersClient(base.BaseServiceTest):
+    FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
+    FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
+    FAKE_RESOURCE_PROVIDER = {
+        'generation': 0,
+        'name': 'Ceph Storage Pool',
+        'uuid': FAKE_RESOURCE_PROVIDER_UUID,
+        'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
+        'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
+    }
+
+    FAKE_RESOURCE_PROVIDERS = {
+        'resource_providers': [FAKE_RESOURCE_PROVIDER]
+    }
+
+    FAKE_RESOURCE_PROVIDER_INVENTORIES = {
+        'inventories': {
+            'DISK_GB': {
+                'allocation_ratio': 1.0,
+                'max_unit': 35,
+                'min_unit': 1,
+                'reserved': 0,
+                'step_size': 1,
+                'total': 35
+            }
+        },
+        'resource_provider_generation': 7
+    }
+
+    FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
+    FAKE_RESOURCE_PROVIDER_AGGREGATES = {
+        'aggregates': [FAKE_AGGREGATE_UUID]
+    }
+    FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE = {
+        "inventories": {
+            "MEMORY_MB": {
+                "allocation_ratio": 2.0,
+                "max_unit": 16,
+                "min_unit": 1,
+                "reserved": 0,
+                "step_size": 4,
+                "total": 128
+            },
+            "VCPU": {
+                "allocation_ratio": 10.0,
+                "max_unit": 2147483647,
+                "min_unit": 1,
+                "reserved": 2,
+                "step_size": 1,
+                "total": 64
+            }
+        },
+        "resource_provider_generation": 2
+    }
+    FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST = {
+        "inventories": {
+            "MEMORY_MB": {
+                "allocation_ratio": 2.0,
+                "max_unit": 16,
+                "step_size": 4,
+                "total": 128
+            },
+            "VCPU": {
+                "allocation_ratio": 10.0,
+                "reserved": 2,
+                "total": 64
+            }
+        },
+        "resource_provider_generation": 1
+    }
+    FAKE_RESOURCE_PROVIDER_USAGES = {
+        "usages": {
+            "VCPU": 2,
+            "MEMORY_MB": 1024,
+            "DISK_GB": 10
+        },
+        "resource_provider_generation": 3
+    }
+
+    def setUp(self):
+        super(TestResourceProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = resource_providers_client.ResourceProvidersClient(
+            fake_auth, 'placement', 'regionOne')
+
+    def _test_list_resource_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDERS,
+            to_utf=bytes_body,
+            status=200
+        )
+
+    def test_list_resource_providers_with_bytes_body(self):
+        self._test_list_resource_providers()
+
+    def test_list_resource_providers_with_str_body(self):
+        self._test_list_resource_providers(bytes_body=True)
+
+    def _test_show_resource_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_resource_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_show_resource_provider_with_str_body(self):
+        self._test_show_resource_provider()
+
+    def test_show_resource_provider_with_bytes_body(self):
+        self._test_show_resource_provider(bytes_body=True)
+
+    def _test_list_resource_provider_inventories(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_inventories,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_inventories_with_str_body(self):
+        self._test_list_resource_provider_inventories()
+
+    def test_list_resource_provider_inventories_with_bytes_body(self):
+        self._test_list_resource_provider_inventories(bytes_body=True)
+
+    def _test_update_resource_providers_inventories(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_resource_providers_inventories,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_RESOURCE_UPDATE_INVENTORIES_RESPONSE,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
+            **self.FAKE_RESOURCE_UPDATE_INVENTORIES_REQUEST
+        )
+
+    def test_update_resource_providers_inventories_with_str_body(self):
+        self._test_update_resource_providers_inventories()
+
+    def test_update_resource_providers_inventories_with_bytes_body(self):
+        self._test_update_resource_providers_inventories(bytes_body=True)
+
+    def test_delete_resource_providers_inventories(self):
+        self.check_service_client_function(
+            self.client.delete_resource_providers_inventories,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            {},
+            status=204,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
+        )
+
+    def _test_list_resource_provider_aggregates(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_aggregates,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_aggregates_with_str_body(self):
+        self._test_list_resource_provider_aggregates()
+
+    def test_list_resource_provider_aggregates_with_bytes_body(self):
+        self._test_list_resource_provider_aggregates(bytes_body=True)
+
+    def _test_list_resource_provider_usages(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_usages,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_USAGES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_show_resource_provider_usages_with_str_body(self):
+        self._test_list_resource_provider_inventories()
+
+    def test_show_resource_provider_usages_with_with_bytes_body(self):
+        self._test_list_resource_provider_inventories(bytes_body=True)
diff --git a/tempest/tests/lib/services/registry_fixture.py b/tempest/tests/lib/services/registry_fixture.py
index 07af68a..a368705 100644
--- a/tempest/tests/lib/services/registry_fixture.py
+++ b/tempest/tests/lib/services/registry_fixture.py
@@ -38,8 +38,7 @@
         """Initialise the registry fixture"""
         self.services = set(['compute', 'identity.v2', 'identity.v3',
                              'image.v1', 'image.v2', 'network', 'placement',
-                             'volume.v1', 'volume.v2', 'volume.v3',
-                             'object-storage'])
+                             'volume.v2', 'volume.v3', 'object-storage'])
 
     def _setUp(self):
         # Cleanup the registry
diff --git a/tempest/tests/lib/services/test_clients.py b/tempest/tests/lib/services/test_clients.py
index f83064a..6c79db6 100644
--- a/tempest/tests/lib/services/test_clients.py
+++ b/tempest/tests/lib/services/test_clients.py
@@ -16,7 +16,6 @@
 from unittest import mock
 
 import fixtures
-import six
 import testtools
 
 from tempest.lib import auth
@@ -270,8 +269,7 @@
                           'module_path': 'This neither',
                           'client_names': ['SomeClient1']}]}
         msg = "(?=.*{0})(?=.*{1})".format(
-            *[x[1][0]['module_path'] for x in six.iteritems(
-                fake_service_clients)])
+            *[x[1][0]['module_path'] for x in fake_service_clients.items()])
         self.useFixture(fixtures.MockPatchObject(
             clients.ClientsRegistry(), 'get_service_clients',
             return_value=fake_service_clients))
@@ -300,8 +298,8 @@
                           'module_path': 'fake_path_2',
                           'client_names': ['SomeClient2']}]}
         msg = "(?=.*{0})(?=.*{1})".format(
-            *[x[1][0]['service_version'] for x in six.iteritems(
-                fake_service_clients)])
+            *[x[1][0]['service_version'] for x in
+                fake_service_clients.items()])
         self.useFixture(fixtures.MockPatchObject(
             clients.ClientsRegistry(), 'get_service_clients',
             return_value=fake_service_clients))
diff --git a/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py b/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py
deleted file mode 100644
index 585904e..0000000
--- a/tempest/tests/lib/services/volume/v1/test_encryption_types_client.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import encryption_types_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestEncryptionTypesClient(base.BaseServiceTest):
-    FAKE_CREATE_ENCRYPTION_TYPE = {
-        "encryption": {
-            "id": "cbc36478b0bd8e67e89",
-            "name": "FakeEncryptionType",
-            "type": "fakeType",
-            "provider": "LuksEncryptor",
-            "cipher": "aes-xts-plain64",
-            "key_size": "512",
-            "control_location": "front-end"
-        }
-    }
-
-    FAKE_INFO_ENCRYPTION_TYPE = {
-        "encryption": {
-            "name": "FakeEncryptionType",
-            "type": "fakeType",
-            "description": "test_description",
-            "volume_type": "fakeType",
-            "provider": "LuksEncryptor",
-            "cipher": "aes-xts-plain64",
-            "key_size": "512",
-            "control_location": "front-end"
-        }
-    }
-
-    def setUp(self):
-        super(TestEncryptionTypesClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = encryption_types_client.EncryptionTypesClient(fake_auth,
-                                                                    'volume',
-                                                                    'regionOne'
-                                                                    )
-
-    def _test_create_encryption(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_CREATE_ENCRYPTION_TYPE,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def _test_show_encryption_type(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_INFO_ENCRYPTION_TYPE,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def test_create_encryption_type_with_str_body(self):
-        self._test_create_encryption()
-
-    def test_create_encryption_type_with_bytes_body(self):
-        self._test_create_encryption(bytes_body=True)
-
-    def test_show_encryption_type_with_str_body(self):
-        self._test_show_encryption_type()
-
-    def test_show_encryption_type_with_bytes_body(self):
-        self._test_show_encryption_type(bytes_body=True)
-
-    def test_delete_encryption_type(self):
-        self.check_service_client_function(
-            self.client.delete_encryption_type,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            volume_type_id="cbc36478b0bd8e67e89",
-            status=202)
diff --git a/tempest/tests/lib/services/volume/v1/test_quotas_client.py b/tempest/tests/lib/services/volume/v1/test_quotas_client.py
deleted file mode 100644
index f9e76af..0000000
--- a/tempest/tests/lib/services/volume/v1/test_quotas_client.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import quotas_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestQuotasClient(base.BaseServiceTest):
-    FAKE_QUOTAS = {
-        "quota_set": {
-            "cores": 20,
-            "fixed_ips": -1,
-            "floating_ips": 10,
-            "id": "fake_tenant",
-            "injected_file_content_bytes": 10240,
-            "injected_file_path_bytes": 255,
-            "injected_files": 5,
-            "instances": 10,
-            "key_pairs": 100,
-            "metadata_items": 128,
-            "ram": 51200,
-            "security_group_rules": 20,
-            "security_groups": 10
-        }
-    }
-
-    FAKE_UPDATE_QUOTAS_REQUEST = {
-        "quota_set": {
-            "security_groups": 45
-        }
-    }
-
-    def setUp(self):
-        super(TestQuotasClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = quotas_client.QuotasClient(fake_auth,
-                                                 'volume',
-                                                 'regionOne')
-
-    def _test_show_default_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_default_quota_set,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_QUOTAS,
-            bytes_body, tenant_id="fake_tenant")
-
-    def _test_show_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_quota_set,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_QUOTAS,
-            bytes_body, tenant_id="fake_tenant")
-
-    def _test_update_quota_set(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_quota_set,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_QUOTAS_REQUEST,
-            bytes_body, tenant_id="fake_tenant")
-
-    def test_show_default_quota_set_with_str_body(self):
-        self._test_show_default_quota_set()
-
-    def test_show_default_quota_set_with_bytes_body(self):
-        self._test_show_default_quota_set(bytes_body=True)
-
-    def test_show_quota_set_with_str_body(self):
-        self._test_show_quota_set()
-
-    def test_show_quota_set_with_bytes_body(self):
-        self._test_show_quota_set(bytes_body=True)
-
-    def test_update_quota_set_with_str_body(self):
-        self._test_update_quota_set()
-
-    def test_update_quota_set_with_bytes_body(self):
-        self._test_update_quota_set(bytes_body=True)
-
-    def test_delete_quota_set(self):
-        self.check_service_client_function(
-            self.client.delete_quota_set,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            tenant_id="fake_tenant")
diff --git a/tempest/tests/lib/services/volume/v1/test_snapshots_client.py b/tempest/tests/lib/services/volume/v1/test_snapshots_client.py
deleted file mode 100644
index 49191e3..0000000
--- a/tempest/tests/lib/services/volume/v1/test_snapshots_client.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2016 NEC Corporation.  All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib.services.volume.v1 import snapshots_client
-from tempest.tests.lib import fake_auth_provider
-from tempest.tests.lib.services import base
-
-
-class TestSnapshotsClient(base.BaseServiceTest):
-    FAKE_CREATE_SNAPSHOT = {
-        "snapshot": {
-            "display_name": "snap-001",
-            "display_description": "Daily backup",
-            "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            "force": True
-        }
-    }
-
-    FAKE_UPDATE_SNAPSHOT_REQUEST = {
-        "metadata": {
-            "key": "v1"
-        }
-    }
-
-    FAKE_INFO_SNAPSHOT = {
-        "snapshot": {
-            "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-            "display_name": "snap-001",
-            "display_description": "Daily backup",
-            "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            "status": "available",
-            "size": 30,
-            "created_at": "2012-02-29T03:50:07Z"
-        }
-    }
-
-    FAKE_LIST_SNAPSHOTS = {
-        "snapshots": [
-            {
-                "id": "3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-                "display_name": "snap-001",
-                "display_description": "Daily backup",
-                "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-                "status": "available",
-                "size": 30,
-                "created_at": "2012-02-29T03:50:07Z",
-                "metadata": {
-                    "contents": "junk"
-                }
-            },
-            {
-                "id": "e479997c-650b-40a4-9dfe-77655818b0d2",
-                "display_name": "snap-002",
-                "display_description": "Weekly backup",
-                "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358",
-                "status": "available",
-                "size": 25,
-                "created_at": "2012-03-19T01:52:47Z",
-                "metadata": {}
-            }
-        ]
-    }
-
-    def setUp(self):
-        super(TestSnapshotsClient, self).setUp()
-        fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = snapshots_client.SnapshotsClient(fake_auth,
-                                                       'volume',
-                                                       'regionOne')
-
-    def _test_create_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_snapshot,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_CREATE_SNAPSHOT,
-            bytes_body)
-
-    def _test_show_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_snapshot,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_list_snapshots(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.list_snapshots,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_LIST_SNAPSHOTS,
-            bytes_body,
-            detail=True)
-
-    def _test_create_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.create_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.post',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5",
-            metadata={"key": "v1"})
-
-    def _test_update_snapshot(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_show_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.show_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.get',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body,
-            snapshot_id="3fbbcccf-d058-4502-8844-6feeffdf4cb5")
-
-    def _test_update_snapshot_metadata(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot_metadata,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_UPDATE_SNAPSHOT_REQUEST,
-            bytes_body, snapshot_id="cbc36478b0bd8e67e89")
-
-    def _test_update_snapshot_metadata_item(self, bytes_body=False):
-        self.check_service_client_function(
-            self.client.update_snapshot_metadata_item,
-            'tempest.lib.common.rest_client.RestClient.put',
-            self.FAKE_INFO_SNAPSHOT,
-            bytes_body, volume_type_id="cbc36478b0bd8e67e89")
-
-    def test_create_snapshot_with_str_body(self):
-        self._test_create_snapshot()
-
-    def test_create_snapshot_with_bytes_body(self):
-        self._test_create_snapshot(bytes_body=True)
-
-    def test_show_snapshot_with_str_body(self):
-        self._test_show_snapshot()
-
-    def test_show_snapshot_with_bytes_body(self):
-        self._test_show_snapshot(bytes_body=True)
-
-    def test_list_snapshots_with_str_body(self):
-        self._test_list_snapshots()
-
-    def test_list_snapshots_with_bytes_body(self):
-        self._test_list_snapshots(bytes_body=True)
-
-    def test_create_snapshot_metadata_with_str_body(self):
-        self._test_create_snapshot_metadata()
-
-    def test_create_snapshot_metadata_with_bytes_body(self):
-        self._test_create_snapshot_metadata(bytes_body=True)
-
-    def test_update_snapshot_with_str_body(self):
-        self._test_update_snapshot()
-
-    def test_update_snapshot_with_bytes_body(self):
-        self._test_update_snapshot(bytes_body=True)
-
-    def test_show_snapshot_metadata_with_str_body(self):
-        self._test_show_snapshot_metadata()
-
-    def test_show_snapshot_metadata_with_bytes_body(self):
-        self._test_show_snapshot_metadata(bytes_body=True)
-
-    def test_update_snapshot_metadata_with_str_body(self):
-        self._test_update_snapshot_metadata()
-
-    def test_update_snapshot_metadata_with_bytes_body(self):
-        self._test_update_snapshot_metadata(bytes_body=True)
-
-    def test_force_delete_snapshot(self):
-        self.check_service_client_function(
-            self.client.force_delete_snapshot,
-            'tempest.lib.common.rest_client.RestClient.post',
-            {},
-            snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            status=202)
-
-    def test_delete_snapshot(self):
-        self.check_service_client_function(
-            self.client.delete_snapshot,
-            'tempest.lib.common.rest_client.RestClient.delete',
-            {},
-            snapshot_id="521752a6-acf6-4b2d-bc7a-119f9148cd8c",
-            status=202)
diff --git a/tempest/tests/lib/services/volume/v3/test_transfers_client.py b/tempest/tests/lib/services/volume/v3/test_transfers_client.py
index 1dfe2df..3626184 100644
--- a/tempest/tests/lib/services/volume/v3/test_transfers_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_transfers_client.py
@@ -52,6 +52,7 @@
         self.client = transfers_client.TransfersClient(fake_auth,
                                                        'volume',
                                                        'regionOne')
+        self.resource_path = 'os-volume-transfer'
 
     def _test_create_volume_transfer(self, bytes_body=False):
         resp_body = copy.deepcopy(self.FAKE_VOLUME_TRANSFER_INFO)
@@ -72,7 +73,7 @@
                 resp_body,
                 to_utf=bytes_body,
                 status=202,
-                mock_args=['os-volume-transfer', payload],
+                mock_args=[self.resource_path, payload],
                 **kwargs)
 
     def _test_accept_volume_transfer(self, bytes_body=False):
@@ -93,8 +94,9 @@
                 resp_body,
                 to_utf=bytes_body,
                 status=202,
-                mock_args=['os-volume-transfer/%s/accept' %
-                           self.FAKE_VOLUME_TRANSFER_ID, payload],
+                mock_args=['%s/%s/accept' % (self.resource_path,
+                                             self.FAKE_VOLUME_TRANSFER_ID),
+                           payload],
                 transfer_id=self.FAKE_VOLUME_TRANSFER_ID,
                 **kwargs)
 
@@ -156,3 +158,14 @@
             {},
             status=202,
             transfer_id="0e89cdd1-6249-421b-96d8-25fac0623d42")
+
+
+class TestTransfersV355Client(TestTransfersClient):
+
+    def setUp(self):
+        super(TestTransfersV355Client, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = transfers_client.TransfersV355Client(fake_auth,
+                                                           'volume',
+                                                           'regionOne')
+        self.resource_path = 'volume-transfers'
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
index 336aa32..19d6591 100644
--- a/tempest/tests/lib/services/volume/v3/test_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -121,6 +121,13 @@
             to_utf=bytes_body,
             volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
 
+    def _test_show_default_volume_type(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_default_volume_type,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+            to_utf=bytes_body)
+
     def _test_create_volume_type(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_volume_type,
@@ -224,6 +231,12 @@
     def test_show_volume_type_with_bytes_body(self):
         self._test_show_volume_type(bytes_body=True)
 
+    def test_show_default_volume_type_with_str_body(self):
+        self._test_show_default_volume_type()
+
+    def test_show_default_volume_type_with_bytes_body(self):
+        self._test_show_default_volume_type(bytes_body=True)
+
     def test_create_volume_type_str_body(self):
         self._test_create_volume_type()
 
diff --git a/tempest/tests/lib/services/volume/v3/test_versions_client.py b/tempest/tests/lib/services/volume/v3/test_versions_client.py
index 575cae3..862fb9b 100644
--- a/tempest/tests/lib/services/volume/v3/test_versions_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_versions_client.py
@@ -22,21 +22,6 @@
     FAKE_VERSIONS_INFO = {
         "versions": [
             {
-                "status": "DEPRECATED", "updated": "2016-05-02T20:25:19Z",
-                "links": [
-                    {"href": "http://docs.openstack.org/", "type": "text/html",
-                     "rel": "describedby"},
-                    {"href": "https://10.30.197.39:8776/v1/", "rel": "self"}
-                ],
-                "min_version": "",
-                "version": "",
-                "media-types": [
-                    {"base": "application/json",
-                     "type": "application/vnd.openstack.volume+json;version=1"}
-                ],
-                "id": "v1.0"
-            },
-            {
                 "status": "DEPRECATED", "updated": "2017-02-25T12:00:00Z",
                 "links": [
                     {"href": "http://docs.openstack.org/", "type": "text/html",
@@ -134,8 +119,6 @@
         self._test_show_version('v3', bytes_body=True)
 
     def test_get_base_version_url_app_name(self):
-        self._test_get_base_version_url('https://bar.org/volume/v1/123',
-                                        'https://bar.org/volume/')
         self._test_get_base_version_url('https://bar.org/volume/v2/123',
                                         'https://bar.org/volume/')
         self._test_get_base_version_url('https://bar.org/volume/v3/123',
diff --git a/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py b/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
index d4313a2..3d47caf 100644
--- a/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_volume_manage_client.py
@@ -54,7 +54,6 @@
                 }
             ],
             "availability_zone": "nova",
-            "os-vol-host-attr:host": "controller1@rbd#rbd",
             "encrypted": False,
             "updated_at": None,
             "replication_status": None,
@@ -62,15 +61,12 @@
             "id": "c07cd4a4-b52b-4511-a176-fbaa2011a227",
             "size": 0,
             "user_id": "142d8663efce464c89811c63e45bd82e",
-            "os-vol-tenant-attr:tenant_id": "f21a9c86d7114bf99c711f4874d80474",
-            "os-vol-mig-status-attr:migstat": None,
             "metadata": {},
             "status": "creating",
             "description": "volume-manage-description",
             "multiattach": False,
             "source_volid": None,
             "consistencygroup_id": None,
-            "os-vol-mig-status-attr:name_id": None,
             "name": "volume-managed",
             "bootable": "false",
             "created_at": "2017-07-11T09:14:01.000000",
diff --git a/tempest/tests/lib/test_auth.py b/tempest/tests/lib/test_auth.py
index c3a792f..3edb122 100644
--- a/tempest/tests/lib/test_auth.py
+++ b/tempest/tests/lib/test_auth.py
@@ -786,6 +786,19 @@
                 self.assertIn(attr, auth_params.keys())
                 self.assertEqual(getattr(all_creds, attr), auth_params[attr])
 
+    def test_auth_parameters_with_system_scope(self):
+        all_creds = fake_credentials.FakeKeystoneV3AllCredentials()
+        self.auth_provider.credentials = all_creds
+        self.auth_provider.scope = 'system'
+        auth_params = self.auth_provider._auth_params()
+        self.assertNotIn('scope', auth_params.keys())
+        for attr in all_creds.get_init_attributes():
+            if attr.startswith('project_') or attr.startswith('domain_'):
+                self.assertNotIn(attr, auth_params.keys())
+            else:
+                self.assertIn(attr, auth_params.keys())
+                self.assertEqual(getattr(all_creds, attr), auth_params[attr])
+
 
 class TestKeystoneV3Credentials(base.TestCase):
     def testSetAttrUserDomain(self):
diff --git a/tempest/tests/lib/test_decorators.py b/tempest/tests/lib/test_decorators.py
index e3c17e8..fc93f76 100644
--- a/tempest/tests/lib/test_decorators.py
+++ b/tempest/tests/lib/test_decorators.py
@@ -16,7 +16,6 @@
 import abc
 from unittest import mock
 
-import six
 import testtools
 
 from tempest.lib import base as test
@@ -69,8 +68,7 @@
                                condition=True)
 
 
-@six.add_metaclass(abc.ABCMeta)
-class BaseSkipDecoratorTests(object):
+class BaseSkipDecoratorTests(object, metaclass=abc.ABCMeta):
 
     @abc.abstractmethod
     def _test_skip_helper(self, raise_exception=True, expected_to_skip=True,
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 85048fb..886d99c 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -12,11 +12,10 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from io import StringIO
 import socket
 from unittest import mock
 
-import six
-from six import StringIO
 import testtools
 
 from tempest.lib.common import ssh
@@ -30,7 +29,7 @@
     SELECT_POLLIN = 1
 
     @mock.patch('paramiko.RSAKey.from_private_key')
-    @mock.patch('six.StringIO')
+    @mock.patch('io.StringIO')
     def test_pkey_calls_paramiko_RSAKey(self, cs_mock, rsa_mock):
         cs_mock.return_value = mock.sentinel.csio
         pkey = 'mykey'
@@ -240,7 +239,7 @@
 
         return chan_mock, poll_mock, select_mock, client_mock
 
-    _utf8_string = six.unichr(1071)
+    _utf8_string = chr(1071)
     _utf8_bytes = _utf8_string.encode("utf-8")
 
     @mock.patch('select.POLLIN', SELECT_POLLIN, create=True)
@@ -274,7 +273,7 @@
         client = ssh.Client('localhost', 'root', timeout=2)
         exc = self.assertRaises(exceptions.SSHExecCommandFailed,
                                 client.exec_command, "test")
-        self.assertIn('R' + self._utf8_string, six.text_type(exc))
+        self.assertIn('R' + self._utf8_string, str(exc))
 
     def test_exec_command_no_select(self):
         gsc_mock = self.patch('tempest.lib.common.ssh.Client.'
diff --git a/tempest/tests/test_base_test.py b/tempest/tests/test_base_test.py
index b154cd5..88c28bf 100644
--- a/tempest/tests/test_base_test.py
+++ b/tempest/tests/test_base_test.py
@@ -109,7 +109,7 @@
 
         test.BaseTestCase.get_tenant_network(credentials_type=creds)
 
-        mock_gcm.assert_called_once_with(roles=['role1'])
+        mock_gcm.assert_called_once_with(roles=['role1'], scope='project')
         mock_gprov.assert_called_once_with()
         mock_gtn.assert_called_once_with(mock_prov, net_client,
                                          self.fixed_network_name)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 6018441..1889420 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -19,7 +19,6 @@
 from tempest.common import utils
 from tempest import config
 from tempest import exceptions
-from tempest.lib.common.utils import data_utils
 from tempest import test
 from tempest.tests import base
 from tempest.tests import fake_config
@@ -33,47 +32,6 @@
                          fake_config.FakePrivate)
 
 
-# NOTE: The test module is for tempest.test.idempotent_id.
-# After all projects switch to use decorators.idempotent_id,
-# we can remove tempest.test.idempotent_id as well as this
-# test module
-class TestIdempotentIdDecorator(BaseDecoratorsTest):
-
-    def _test_helper(self, _id, **decorator_args):
-        @test.idempotent_id(_id)
-        def foo():
-            """Docstring"""
-            pass
-
-        return foo
-
-    def _test_helper_without_doc(self, _id, **decorator_args):
-        @test.idempotent_id(_id)
-        def foo():
-            pass
-
-        return foo
-
-    def test_positive(self):
-        _id = data_utils.rand_uuid()
-        foo = self._test_helper(_id)
-        self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
-        self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
-    def test_positive_without_doc(self):
-        _id = data_utils.rand_uuid()
-        foo = self._test_helper_without_doc(_id)
-        self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
-    def test_idempotent_id_not_str(self):
-        _id = 42
-        self.assertRaises(TypeError, self._test_helper, _id)
-
-    def test_idempotent_id_not_valid_uuid(self):
-        _id = '42'
-        self.assertRaises(ValueError, self._test_helper, _id)
-
-
 class TestServicesDecorator(BaseDecoratorsTest):
     def _test_services_helper(self, *decorator_args):
         class TestFoo(test.BaseTestCase):
diff --git a/tempest/tests/test_list_tests.py b/tempest/tests/test_list_tests.py
index 1cc9c9a..fe44ef6 100644
--- a/tempest/tests/test_list_tests.py
+++ b/tempest/tests/test_list_tests.py
@@ -16,8 +16,6 @@
 import re
 import subprocess
 
-import six
-
 from tempest.tests import base
 
 
@@ -32,7 +30,7 @@
         self.assertEqual(0, p.returncode,
                          "test discovery failed, one or more files cause an "
                          "error on import %s" % ids)
-        ids = six.text_type(ids).split('\n')
+        ids = str(ids).split('\n')
         for test_id in ids:
             if re.match(r'(\w+\.){3}\w+', test_id):
                 if not test_id.startswith('tempest.'):
diff --git a/tempest/tests/test_microversions.py b/tempest/tests/test_microversions.py
index ee6db71..835f51c 100644
--- a/tempest/tests/test_microversions.py
+++ b/tempest/tests/test_microversions.py
@@ -13,7 +13,6 @@
 #    under the License.
 
 from oslo_config import cfg
-import six
 import testtools
 
 from tempest.api.compute import base as compute_base
@@ -75,7 +74,7 @@
                 self.assertRaises(testtools.TestCase.skipException,
                                   test_class.skip_checks)
         except testtools.TestCase.skipException as e:
-            raise testtools.TestCase.failureException(six.text_type(e))
+            raise testtools.TestCase.failureException(str(e))
 
     def test_config_version_none_none(self):
         expected_pass_tests = [VersionTestNoneTolatest, VersionTestNoneTo2_2]
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index 72e8b6d..9aeedb3 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -453,6 +453,130 @@
             expected_creds[1][1:],
             mock_get_client_manager.mock_calls[1][2]['roles'])
 
+    def test_setup_credentials_with_role_and_system_scope(self):
+        expected_creds = [['system_my_role', 'role1', 'role2']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+        self.assertEqual(1, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'system',
+            mock_get_client_manager.mock_calls[0][2]['scope'])
+
+    def test_setup_credentials_with_multiple_role_and_system_scope(self):
+        expected_creds = [['system_my_role', 'role1', 'role2'],
+                          ['system_my_role2', 'role1', 'role2'],
+                          ['system_my_role3', 'role3']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role2'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role2)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role2'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role2)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role3'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role3)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role3'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role3)
+        self.assertEqual(3, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[0][2]['scope'])
+        self.assertEqual(
+            expected_creds[1][1:],
+            mock_get_client_manager.mock_calls[1][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[1][2]['scope'])
+        self.assertEqual(
+            expected_creds[2][1:],
+            mock_get_client_manager.mock_calls[2][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[2][2]['scope'])
+
+    def test_setup_credentials_with_role_and_multiple_scope(self):
+        expected_creds = [['my_role', 'role1', 'role2'],
+                          ['project_my_role', 'role1', 'role2'],
+                          ['domain_my_role', 'role1', 'role2'],
+                          ['system_my_role', 'role1', 'role2']]
+
+        class SystemRoleCredentials(self.parent_test):
+            credentials = expected_creds
+
+        expected_clients = 'clients'
+        with mock.patch.object(
+                SystemRoleCredentials,
+                'get_client_manager') as mock_get_client_manager:
+            mock_get_client_manager.return_value = expected_clients
+            sys_creds = SystemRoleCredentials()
+            sys_creds.setup_credentials()
+        self.assertTrue(hasattr(sys_creds, 'os_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_project_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_project_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_project_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_project_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_domain_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_domain_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_domain_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_domain_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_system_my_role)
+        self.assertTrue(hasattr(sys_creds, 'os_roles_system_my_role'))
+        self.assertEqual(expected_clients, sys_creds.os_roles_system_my_role)
+
+        self.assertEqual(4, mock_get_client_manager.call_count)
+        self.assertEqual(
+            expected_creds[0][1:],
+            mock_get_client_manager.mock_calls[0][2]['roles'])
+        self.assertEqual(
+            'project', mock_get_client_manager.mock_calls[0][2]['scope'])
+        self.assertEqual(
+            expected_creds[1][1:],
+            mock_get_client_manager.mock_calls[1][2]['roles'])
+        self.assertEqual(
+            'project', mock_get_client_manager.mock_calls[1][2]['scope'])
+        self.assertEqual(
+            expected_creds[2][1:],
+            mock_get_client_manager.mock_calls[2][2]['roles'])
+        self.assertEqual(
+            'domain', mock_get_client_manager.mock_calls[2][2]['scope'])
+        self.assertEqual(
+            expected_creds[3][1:],
+            mock_get_client_manager.mock_calls[3][2]['roles'])
+        self.assertEqual(
+            'system', mock_get_client_manager.mock_calls[3][2]['scope'])
+
     def test_setup_class_overwritten(self):
 
         class OverridesSetup(self.parent_test):
diff --git a/tools/check_logs.py b/tools/check_logs.py
index de7e41d..8ab3af2 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -17,12 +17,12 @@
 
 import argparse
 import gzip
+import io
 import os
 import re
 import sys
+import urllib.request as urlreq
 
-import six
-import six.moves.urllib.request as urlreq
 import yaml
 
 # DEVSTACK_GATE_GRENADE is either unset if grenade is not running
@@ -56,39 +56,39 @@
     's-proxy'])
 
 
-def process_files(file_specs, url_specs, whitelists):
+def process_files(file_specs, url_specs, allow_lists):
     regexp = re.compile(r"^.* (ERROR|CRITICAL|TRACE) .*\[.*\-.*\]")
     logs_with_errors = []
     for (name, filename) in file_specs:
-        whitelist = whitelists.get(name, [])
+        allow_list = allow_lists.get(name, [])
         with open(filename) as content:
-            if scan_content(content, regexp, whitelist):
+            if scan_content(content, regexp, allow_list):
                 logs_with_errors.append(name)
     for (name, url) in url_specs:
-        whitelist = whitelists.get(name, [])
+        allow_list = allow_lists.get(name, [])
         req = urlreq.Request(url)
         req.add_header('Accept-Encoding', 'gzip')
         page = urlreq.urlopen(req)
-        buf = six.StringIO(page.read())
+        buf = io.StringIO(page.read())
         f = gzip.GzipFile(fileobj=buf)
-        if scan_content(f.read().splitlines(), regexp, whitelist):
+        if scan_content(f.read().splitlines(), regexp, allow_list):
             logs_with_errors.append(name)
     return logs_with_errors
 
 
-def scan_content(content, regexp, whitelist):
+def scan_content(content, regexp, allow_list):
     had_errors = False
     for line in content:
         if not line.startswith("Stderr:") and regexp.match(line):
-            whitelisted = False
-            for w in whitelist:
+            allowed = False
+            for w in allow_list:
                 pat = ".*%s.*%s.*" % (w['module'].replace('.', '\\.'),
                                       w['message'])
                 if re.match(pat, line):
-                    whitelisted = True
+                    allowed = True
                     break
-            if not whitelisted or dump_all_errors:
-                if not whitelisted:
+            if not allowed or dump_all_errors:
+                if not allowed:
                     had_errors = True
     return had_errors
 
@@ -105,9 +105,9 @@
         print("Must provide exactly one of -d or -u")
         return 1
     print("Checking logs...")
-    WHITELIST_FILE = os.path.join(
+    ALLOW_LIST_FILE = os.path.join(
         os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
-        "etc", "whitelist.yaml")
+        "etc", "allow-list.yaml")
 
     file_matcher = re.compile(r".*screen-([\w-]+)\.log")
     files = []
@@ -132,17 +132,17 @@
         if m:
             urls_to_process.append((m.group(1), u))
 
-    whitelists = {}
-    with open(WHITELIST_FILE) as stream:
+    allow_lists = {}
+    with open(ALLOW_LIST_FILE) as stream:
         loaded = yaml.safe_load(stream)
         if loaded:
-            for (name, l) in six.iteritems(loaded):
+            for (name, l) in loaded.values():
                 for w in l:
                     assert 'module' in w, 'no module in %s' % name
                     assert 'message' in w, 'no message in %s' % name
-            whitelists = loaded
+            allow_lists = loaded
     logs_with_errors = process_files(files_to_process, urls_to_process,
-                                     whitelists)
+                                     allow_lists)
 
     failed = False
     if logs_with_errors:
@@ -164,14 +164,14 @@
 
 
 usage = """
-Find non-white-listed log errors in log files from a devstack-gate run.
+Find non-allow-listed log errors in log files from a devstack-gate run.
 Log files will be searched for ERROR or CRITICAL messages. If any
-error messages do not match any of the whitelist entries contained in
-etc/whitelist.yaml, those messages will be printed to the console and
+error messages do not match any of the allow-list entries contained in
+etc/allow-list.yaml, those messages will be printed to the console and
 failure will be returned. A file directory containing logs or a url to the
 log files of an OpenStack gate job can be provided.
 
-The whitelist yaml looks like:
+The allow-list yaml looks like:
 
 log-name:
     - module: "a.b.c"
@@ -179,7 +179,7 @@
     - module: "a.b.c"
       message: "regexp"
 
-repeated for each log file with a whitelist.
+repeated for each log file with an allow-list.
 """
 
 parser = argparse.ArgumentParser(description=usage)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 618c388..1b5b369 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -32,9 +32,9 @@
 
 # List of projects having tempest plugin stale or unmaintained for a long time
 # (6 months or more)
-# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
-# when the patches are merged.
-BLACKLIST = [
+# TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
+# future when the patches are merged.
+NON_ACTIVE_LIST = [
     'x/gce-api',  # It looks gce-api doesn't support python3 yet.
     'x/glare',  # To avoid sanity-job failure
     'x/group-based-policy',  # It looks this doesn't support python3 yet.
@@ -52,8 +52,11 @@
     'x/tap-as-a-service',  # To avoid sanity-job failure
     'x/valet',  # https://review.opendev.org/#/c/638339/
     'x/kingbird',  # https://bugs.launchpad.net/kingbird/+bug/1869722
-    # vmware-nsx is blacklisted since https://review.opendev.org/#/c/736952
+    # vmware-nsx is excluded since https://review.opendev.org/#/c/736952
     'x/vmware-nsx-tempest-plugin',
+    # mogan is unmaintained now, remove from the list when this is merged:
+    # https://review.opendev.org/c/x/mogan/+/767718
+    'x/mogan',
 ]
 
 url = 'https://review.opendev.org/projects/'
@@ -86,10 +89,10 @@
         False
 
 
-if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
-    for black_plugin in BLACKLIST:
-        print(black_plugin)
-    # We just need BLACKLIST when we use this `blacklist` option.
+if len(sys.argv) > 1 and sys.argv[1] == 'nonactivelist':
+    for non_active_plugin in NON_ACTIVE_LIST:
+        print(non_active_plugin)
+    # We just need NON_ACTIVE_LIST when we use this `nonactivelist` option.
     # So, this exits here.
     sys.exit()
 
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index 33675ed..4430bbf 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -81,17 +81,17 @@
 
 printf "\n\n"
 
-# Print BLACKLIST
-if [[ -r doc/source/data/tempest-blacklisted-plugins-registry.header ]]; then
-    cat doc/source/data/tempest-blacklisted-plugins-registry.header
+# Print NON_ACTIVE_LIST
+if [[ -r doc/source/data/tempest-non-active-plugins-registry.header ]]; then
+    cat doc/source/data/tempest-non-active-plugins-registry.header
 fi
 
-blacklist=$(python tools/generate-tempest-plugins-list.py blacklist)
-name_col_len=$(echo "${blacklist}" | wc -L)
+nonactivelist=$(python tools/generate-tempest-plugins-list.py nonactivelist)
+name_col_len=$(echo "${nonactivelist}" | wc -L)
 name_col_len=$(( name_col_len + 20 ))
 
 printf "\n\n"
-print_plugin_table "${blacklist}"
+print_plugin_table "${nonactivelist}"
 
 printf "\n\n"
 
diff --git a/tools/tempest-integrated-gate-compute-blacklist.txt b/tools/tempest-integrated-gate-compute-exclude-list.txt
similarity index 60%
rename from tools/tempest-integrated-gate-compute-blacklist.txt
rename to tools/tempest-integrated-gate-compute-exclude-list.txt
index 2290751..8805262 100644
--- a/tools/tempest-integrated-gate-compute-blacklist.txt
+++ b/tools/tempest-integrated-gate-compute-exclude-list.txt
@@ -11,9 +11,3 @@
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
 tempest.scenario.test_volume_backup_restore.TestVolumeBackupRestore.test_volume_backup_restore
-
-# Skip test scenario when creating second image from instance
-# https://bugs.launchpad.net/tripleo/+bug/1881592
-# The test is most likely wrong and may fail if the fists image is create quickly.
-# FIXME: Either fix the test so it won't race or consider if we should cover the scenario at all.
-tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_second_image_when_first_image_is_being_saved
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-networking-blacklist.txt
rename to tools/tempest-integrated-gate-networking-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-object-storage-blacklist.txt b/tools/tempest-integrated-gate-object-storage-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-object-storage-blacklist.txt
rename to tools/tempest-integrated-gate-object-storage-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-placement-blacklist.txt
rename to tools/tempest-integrated-gate-placement-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
new file mode 120000
index 0000000..2d691f8
--- /dev/null
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -0,0 +1 @@
+tempest-integrated-gate-storage-exclude-list.txt
\ No newline at end of file
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-storage-blacklist.txt
rename to tools/tempest-integrated-gate-storage-exclude-list.txt
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index c983da9..106a9c6 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -44,7 +44,7 @@
 # retrieve a list of projects having tempest plugins
 PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
 
-BLACKLIST="$(python tools/generate-tempest-plugins-list.py blacklist)"
+NON_ACTIVE_LIST="$(python tools/generate-tempest-plugins-list.py nonactivelist)"
 
 # Function to clone project using zuul-cloner or from git
 function clone_project {
@@ -117,8 +117,8 @@
 failed_plugin=''
 # Perform sanity on all tempest plugin projects
 for project in $PROJECT_LIST; do
-    # Remove blacklisted tempest plugins
-    if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
+    # Remove non-active tempest plugins
+    if ! [[ `echo $NON_ACTIVE_LIST | grep -c $project ` -gt 0 ]]; then
         plugin_sanity_check $project && passed_plugin+=", $project" || \
         failed_plugin+="$project, " > $SANITY_DIR/$project.txt
     fi
diff --git a/tox.ini b/tox.ini
index 0477d6f..cd32174 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 envlist = pep8,py36,py38,bashate,pip-check-reqs
-minversion = 3.1.1
+minversion = 3.18.0
 skipsdist = True
 ignore_basepython_conflict = True
 
@@ -23,10 +23,10 @@
     OS_STDERR_CAPTURE=1
     OS_TEST_TIMEOUT=160
     PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
 usedevelop = True
 install_command = pip install {opts} {packages}
-whitelist_externals = *
+allowlist_externals = *
 deps =
     -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
@@ -108,7 +108,7 @@
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
 commands =
     find . -type f -name "*.pyc" -delete
     tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
@@ -132,11 +132,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
 
 [testenv:integrated-compute]
 envdir = .tox/tempest
@@ -145,11 +145,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
 
 [testenv:integrated-placement]
 envdir = .tox/tempest
@@ -158,11 +158,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
 
 [testenv:integrated-storage]
 envdir = .tox/tempest
@@ -171,11 +171,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
 
 [testenv:integrated-object-storage]
 envdir = .tox/tempest
@@ -184,11 +184,11 @@
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
-    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
+    tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
+    tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
 
 [testenv:full-serial]
 envdir = .tox/tempest
@@ -198,7 +198,7 @@
 deps = {[tempestenv]deps}
 # The regex below is used to select which tests to run and exclude the slow tag:
 # See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
 commands =
     find . -type f -name "*.pyc" -delete
     tempest run --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))' {posargs}
@@ -279,18 +279,33 @@
 [testenv:docs]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
+  sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
+  sphinx-apidoc -f -o doc/source/tests/identity tempest/api/identity
+  sphinx-apidoc -f -o doc/source/tests/image tempest/api/image
+  sphinx-apidoc -f -o doc/source/tests/network tempest/api/network
+  sphinx-apidoc -f -o doc/source/tests/object_storage tempest/api/object_storage
+  sphinx-apidoc -f -o doc/source/tests/scenario tempest/scenario
+  sphinx-apidoc -f -o doc/source/tests/volume tempest/api/volume
   rm -rf doc/build
   sphinx-build -W -b html doc/source doc/build/html
-whitelist_externals = rm
+allowlist_externals =
+    rm
 
 [testenv:pdf-docs]
 deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
+   rm
    make
 commands =
+   sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
+   sphinx-apidoc -f -o doc/source/tests/identity tempest/api/identity
+   sphinx-apidoc -f -o doc/source/tests/image tempest/api/image
+   sphinx-apidoc -f -o doc/source/tests/network tempest/api/network
+   sphinx-apidoc -f -o doc/source/tests/object_storage tempest/api/object_storage
+   sphinx-apidoc -f -o doc/source/tests/scenario tempest/scenario
+   sphinx-apidoc -f -o doc/source/tests/volume tempest/api/volume
    sphinx-build -W -b latex doc/source doc/build/pdf
    make -C doc/build/pdf
 
@@ -349,13 +364,12 @@
 [testenv:releasenotes]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
   rm -rf releasenotes/build
   sphinx-build -a -E -W -d releasenotes/build/doctrees \
          -b html releasenotes/source releasenotes/build/html
-whitelist_externals = rm
+allowlist_externals = rm
 
 [testenv:bashate]
 # if you want to test out some changes you have made to bashate
@@ -363,7 +377,7 @@
 # modified bashate tree
 deps =
    {env:BASHATE_INSTALL_PATH:bashate}
-whitelist_externals = bash
+allowlist_externals = bash
 commands = bash -c "find {toxinidir}/tools    \
          -not \( -type d -name .?\* -prune \) \
          -type f                              \
@@ -392,6 +406,18 @@
 
 [testenv:plugin-sanity-check]
 # perform tempest plugin sanity
-whitelist_externals = bash
+allowlist_externals = bash
 commands =
   bash tools/tempest-plugin-sanity.sh
+
+[testenv:stestr-master]
+envdir = .tox/tempest
+sitepackages = {[tempestenv]sitepackages}
+basepython = {[tempestenv]basepython}
+setenv = {[tempestenv]setenv}
+deps = {[tempestenv]deps}
+# The below command install stestr master version and run smoke tests
+commands =
+    find . -type f -name "*.pyc" -delete
+    pip install -U git+https://github.com/mtreinish/stestr
+    tempest run --regex '\[.*\bsmoke\b.*\]' {posargs}
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
new file mode 100644
index 0000000..3deb944
--- /dev/null
+++ b/zuul.d/base.yaml
@@ -0,0 +1,86 @@
+- job:
+    name: devstack-tempest
+    parent: devstack
+    description: |
+      Base Tempest job.
+
+      This Tempest job provides the base for both the single and multi-node
+      test setup. To run a multi-node test inherit from devstack-tempest and
+      set the nodeset to a multi-node one.
+    required-projects: &base_required-projects
+      - opendev.org/openstack/tempest
+    timeout: 7200
+    roles: &base_roles
+      - zuul: opendev.org/openstack/devstack
+    vars: &base_vars
+      devstack_services:
+        tempest: true
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            compute:
+              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+      test_results_stage_name: test_results
+      zuul_copy_output:
+        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+        '{{ devstack_base_dir }}/tempest/tempest.log': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+        '{{ stage_dir }}/stackviz': logs
+      extensions_to_txt:
+        conf: true
+        log: true
+        yaml: true
+        yml: true
+    run: playbooks/devstack-tempest.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: devstack-tempest-ipv6
+    parent: devstack-ipv6
+    description: |
+      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+      which set the IPv6-only setting for OpenStack services. As part of
+      run phase, this job will verify the IPv6 setting and check the services
+      endpoints and listen addresses are IPv6. Basically it will run the script
+      ./tool/verify-ipv6-only-deployments.sh
+
+      Child jobs of this job can run their own set of tests and can
+      add post-run playebooks to extend the IPv6 verification specific
+      to their deployed services.
+      Check the wiki page for more details about project jobs setup
+      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+    required-projects: *base_required-projects
+    timeout: 7200
+    roles: *base_roles
+    vars: *base_vars
+    run: playbooks/devstack-tempest-ipv6.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: tempest-multinode-full-base
+    parent: devstack-tempest
+    description: |
+      Base multinode integration test with Neutron networking and py27.
+      Former names for this job were:
+        * neutron-tempest-multinode-full
+        * legacy-tempest-dsvm-neutron-multinode-full
+        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+      This job includes two nodes, controller / tempest plus a subnode, but
+      it can be used with different topologies, as long as a controller node
+      and a tempest one exist.
+    timeout: 10800
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        FORCE_CONFIG_DRIVE: false
+        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+        LIVE_MIGRATION_AVAILABLE: true
+        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+    group-vars:
+      peers:
+        devstack_localrc:
+          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+          LIVE_MIGRATION_AVAILABLE: true
+          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
new file mode 100644
index 0000000..8bf53a9
--- /dev/null
+++ b/zuul.d/integrated-gate.yaml
@@ -0,0 +1,396 @@
+# NOTE(gmann): This file includes all integrated jobs definition which
+# are supposed to be run by Tempest and other projects as
+# integrated testing.
+- job:
+    name: tempest-all
+    parent: devstack-tempest
+    description: |
+      Integration test that runs all tests.
+      Former name for this job was:
+        * legacy-periodic-tempest-dsvm-all-master
+    vars:
+      tox_envlist: all
+      tempest_test_regex: tempest
+      # TODO(gmann): Enable File injection tests once nova bug is fixed
+      # https://bugs.launchpad.net/nova/+bug/1882421
+      # devstack_localrc:
+      #   ENABLE_FILE_INJECTION: true
+
+- job:
+    name: tempest-ipv6-only
+    parent: devstack-tempest-ipv6
+    # This currently works from stable/pike on.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Integration test of IPv6-only deployments. This job runs
+      smoke and IPv6 relates tests only. Basic idea is to test
+      whether OpenStack Services listen on IPv6 addrress or not.
+    timeout: 10800
+    vars:
+      tox_envlist: ipv6-only
+
+- job:
+    name: tempest-full
+    parent: devstack-tempest
+    # This currently works from stable/pike on.
+    # Before stable/pike, legacy version of tempest-full
+    # 'legacy-tempest-dsvm-neutron-full' run.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking and py27.
+      This job is supposed to run until stable/train setup only.
+      If you are running it on stable/ussuri gate onwards for python2.7
+      coverage then you need to do override-checkout with any stable
+      branch less than or equal to stable/train.
+      Former names for this job where:
+        * legacy-tempest-dsvm-neutron-full
+        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        ENABLE_FILE_INJECTION: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        USE_PYTHON3: False
+      devstack_services:
+        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+        # since tempest-full is in the integrated-gate project template but
+        # the backup tests do not really involve other services so they should
+        # be run in some more cinder-specific job, especially because the
+        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
+    # This job version is with swift enabled on py3
+    # as swift is ready on py3 from stable/ussuri onwards.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    description: |
+      Base integration test with Neutron networking, horizon, swift enable,
+      and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+        neutron-placement: true
+        neutron-qos: true
+
+- job:
+    name: tempest-integrated-networking
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This  job runs integration tests for networking. This is subset of
+      'tempest-full-py3' job and run only Neutron and Nova related tests.
+      This is meant to be run on neutron gate only.
+    vars:
+      tox_envlist: integrated-network
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-compute
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+    vars:
+      tox_envlist: integrated-compute
+      tempest_exclude_regex: ""
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-placement
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for placement. This is
+      subset of 'tempest-full-py3' job and run Nova and Neutron
+      related tests. This is meant to be run on Placement gate only.
+    vars:
+      tox_envlist: integrated-placement
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for image & block storage. This is
+      subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
+      related tests. This is meant to be run on Cinder and Glance gate only.
+    vars:
+      tox_envlist: integrated-storage
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+
+- job:
+    name: tempest-integrated-object-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for object storage. This is
+      subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
+      related tests. This is meant to be run on Swift gate only.
+    vars:
+      tox_envlist: integrated-object-storage
+      devstack_localrc:
+        # NOTE(gmann): swift is not ready on python3 yet and devstack
+        # install it on python2.7 only. But settting the USE_PYTHON3
+        # for future once swift is ready on py3.
+        USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal from stable/victoria on.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+        devstack_local_conf:
+          post-config:
+            "/$NEUTRON_CORE_PLUGIN_CONF":
+              ovs:
+                bridge_mappings: public:br-ex
+                resource_provider_bandwidths: br-ex:1000000:1000000
+
+- job:
+    name: tempest-slow
+    parent: tempest-multinode-full
+    description: |
+      This multinode integration job will run all the tests tagged as slow.
+      It enables the lvm multibackend setup to cover few scenario tests.
+      This job will run only slow tests (API or Scenario) serially.
+
+      Former names for this job were:
+        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+        * tempest-scenario-multinode-lvm-multibackend
+    timeout: 10800
+    # This job runs on stable/stein onwards.
+    branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
+    vars:
+      tox_envlist: slow-serial
+      devstack_localrc:
+        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+      tempest_concurrency: 2
+    group-vars:
+      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+      # the controller and subnode prior to Rocky so we have to make sure the
+      # variable is set in both locations.
+      subnode:
+        devstack_localrc:
+          ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+    name: tempest-slow-py3
+    parent: tempest-slow
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-cinder-v2-api
+    parent: devstack-tempest
+    # NOTE(gmann): Cinder v2 APIs are available until
+    # stable/wallaby only.
+    override-checkout: stable/wallaby
+    description: |
+      This job runs the cinder API test against v2 endpoint.
+    vars:
+      tox_envlist: all
+      tempest_test_regex: api.*volume
+      devstack_localrc:
+        TEMPEST_VOLUME_TYPE: volumev2
+
+- job:
+    name: tempest-pg-full
+    parent: tempest-full-py3
+    description: |
+      Base integration test with Neutron networking and PostgreSQL.
+      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+    vars:
+      devstack_localrc:
+        # TODO(gmann): Enable File injection tests once nova bug is fixed
+        # https://bugs.launchpad.net/nova/+bug/1882421
+        # ENABLE_FILE_INJECTION: true
+        DATABASE_TYPE: postgresql
+
+- project-template:
+    name: integrated-gate-networking
+    description: |
+      Run the python3 Tempest network integration tests (Nova and Neutron related)
+      in check and gate for the neutron integrated gate. This is meant to be
+      run on neutron gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+
+- project-template:
+    name: integrated-gate-compute
+    description: |
+      Run the python3 Tempest compute integration tests
+      (Nova, Neutron, Cinder and Glance related) in check and gate
+      for the Nova integrated gate. This is meant to be
+      run on Nova gate only.
+    check:
+      jobs:
+        - tempest-integrated-compute
+    gate:
+      jobs:
+        - tempest-integrated-compute
+
+- project-template:
+    name: integrated-gate-placement
+    description: |
+      Run the python3 Tempest placement integration tests
+      (Nova and Neutron related) in check and gate
+      for the Placement integrated gate. This is meant to be
+      run on Placement gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+
+- project-template:
+    name: integrated-gate-storage
+    description: |
+      Run the python3 Tempest image & block storage integration tests
+      (Cinder, Glance, Swift and Nova related) in check and gate
+      for the neutron integrated gate. This is meant to be
+      run on Cinder and Glance gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+
+- project-template:
+    name: integrated-gate-object-storage
+    description: |
+      Run the python3 Tempest object storage integration tests
+      (Swift, Cinder and Glance related) in check and gate
+      for the swift integrated gate. This is meant to be
+      run on swift gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..698df53
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,137 @@
+- project:
+    templates:
+      - check-requirements
+      - integrated-gate-py3
+      - openstack-cover-jobs
+      - openstack-python3-xena-jobs
+      - publish-openstack-docs-pti
+      - release-notes-jobs-python3
+    check:
+      jobs:
+        - tempest-full-parallel:
+            # Define list of irrelevant files to use everywhere else
+            irrelevant-files: &tempest-irrelevant-files
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-ipv6:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - glance-multistore-cinder-import:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-wallaby-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-victoria-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ussuri-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-train-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-tox-plugin-sanity-check:
+            irrelevant-files: &tempest-irrelevant-files-2
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+              # tools/ is not here since this relies on a script in tools/.
+        - tempest-ipv6-only:
+            irrelevant-files: *tempest-irrelevant-files-2
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-live-migration:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-tempest-dvr:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - interop-tempest-consistency:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-no-admin-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - openstack-tox-bashate:
+            irrelevant-files: *tempest-irrelevant-files-2
+    gate:
+      jobs:
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-ipv6-only:
+            irrelevant-files: *tempest-irrelevant-files-2
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+    experimental:
+      jobs:
+        - tempest-stestr-master
+        - tempest-cinder-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-all:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-tempest-dvr-ha-multinode-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-tempest-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - cinder-tempest-lvm-multibackend:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-pg-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-opensuse15:
+            irrelevant-files: *tempest-irrelevant-files
+    periodic-stable:
+      jobs:
+        - tempest-full-wallaby-py3
+        - tempest-full-victoria-py3
+        - tempest-full-ussuri-py3
+        - tempest-full-train-py3
+    periodic:
+      jobs:
+        - tempest-all
+        - tempest-full-oslo-master
+        - tempest-stestr-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
new file mode 100644
index 0000000..455e283
--- /dev/null
+++ b/zuul.d/stable-jobs.yaml
@@ -0,0 +1,155 @@
+# NOTE(gmann): This file includes all stable release jobs definition.
+- job:
+    name: tempest-full-wallaby-py3
+    parent: tempest-full-py3
+    override-checkout: stable/wallaby
+
+- job:
+    name: tempest-full-victoria-py3
+    parent: tempest-full-py3
+    override-checkout: stable/victoria
+
+- job:
+    name: tempest-full-ussuri-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/ussuri
+
+- job:
+    name: tempest-full-train-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/train
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
+    # This job version is with swift disabled on py3
+    # as swift was not ready on py3 until stable/train.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+      - stable/stein
+      - stable/train
+    description: |
+      Base integration test with Neutron networking, swift disabled, and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        # NOTE(mriedem): Disable the cinder-backup service from
+        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+        # project template but the backup tests do not really involve other
+        # services so they should be run in some more cinder-specific job,
+        # especially because the tests fail at a high rate (see bugs 1483434,
+        # 1813217, 1745168)
+        c-bak: false
+        neutron-placement: true
+        neutron-qos: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-bionic
+    # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+    # This job is prepared to make sure all stable branches from stable/stein till stable/train
+    # will keep running on bionic. This can be removed once stable/train is EOL.
+    branches:
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-xenial
+    # This job runs on Xenial and this is for stable/pike, stable/queens
+    # and stable/rocky. This job is prepared to make sure all stable branches
+    # before stable/stein will keep running on xenial. This job can be
+    # removed once stable/rocky is EOL.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-slow
+    parent: tempest-multinode-full
+    description: |
+      This multinode integration job will run all the tests tagged as slow.
+      It enables the lvm multibackend setup to cover few scenario tests.
+      This job will run only slow tests (API or Scenario) serially.
+      Former names for this job were:
+        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+        * tempest-scenario-multinode-lvm-multibackend
+    timeout: 10800
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+    vars:
+      tox_envlist: slow-serial
+      devstack_localrc:
+        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+        ENABLE_VOLUME_MULTIATTACH: true
+        # to avoid https://bugs.launchpad.net/neutron/+bug/1914037
+        # as we couldn't backport the fix to rocky and older releases
+        IPV6_PUBLIC_RANGE: 2001:db8:0:10::/64
+        IPV6_PUBLIC_NETWORK_GATEWAY: 2001:db8:0:10::2
+        IPV6_ROUTER_GW_IP: 2001:db8:0:10::1
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+      tempest_concurrency: 2
+    group-vars:
+      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+      # the controller and subnode prior to Rocky so we have to make sure the
+      # variable is set in both locations.
+      subnode:
+        devstack_localrc:
+          ENABLE_VOLUME_MULTIATTACH: true
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
new file mode 100644
index 0000000..5063d89
--- /dev/null
+++ b/zuul.d/tempest-specific.yaml
@@ -0,0 +1,131 @@
+# NOTE(gmann): This file includes all tempest specific jobs definition which
+# are supposed to be run by Tempest gate only.
+- job:
+    name: tempest-full-oslo-master
+    parent: tempest-full-py3
+    description: |
+      Integration test using current git of oslo libs.
+      This ensures that when oslo libs get released that they
+      do not break OpenStack server projects.
+
+      Former name for this job was
+      periodic-tempest-dsvm-oslo-latest-full-master.
+    timeout: 10800
+    required-projects:
+      - opendev.org/openstack/oslo.cache
+      - opendev.org/openstack/oslo.concurrency
+      - opendev.org/openstack/oslo.config
+      - opendev.org/openstack/oslo.context
+      - opendev.org/openstack/oslo.db
+      - opendev.org/openstack/oslo.i18n
+      - opendev.org/openstack/oslo.log
+      - opendev.org/openstack/oslo.messaging
+      - opendev.org/openstack/oslo.middleware
+      - opendev.org/openstack/oslo.policy
+      - opendev.org/openstack/oslo.privsep
+      - opendev.org/openstack/oslo.reports
+      - opendev.org/openstack/oslo.rootwrap
+      - opendev.org/openstack/oslo.serialization
+      - opendev.org/openstack/oslo.service
+      - opendev.org/openstack/oslo.utils
+      - opendev.org/openstack/oslo.versionedobjects
+      - opendev.org/openstack/oslo.vmware
+
+- job:
+    name: tempest-full-parallel
+    parent: tempest-full-py3
+    voting: false
+    branches:
+      - master
+    description: |
+      Base integration test with Neutron networking.
+      It includes all scenarios as it was in the past.
+      This job runs all scenario tests in parallel!
+    timeout: 9000
+    vars:
+      tox_envlist: full-parallel
+      run_tempest_cleanup: true
+      run_tempest_dry_cleanup: true
+      devstack_localrc:
+        DEVSTACK_PARALLEL: True
+
+- job:
+    name: tempest-full-py3-ipv6
+    parent: devstack-tempest-ipv6
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking, IPv6 and py3.
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3-opensuse15
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-opensuse-15
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on openSUSE Leap 15.x
+    voting: false
+
+- job:
+    name: tempest-tox-plugin-sanity-check
+    parent: tox
+    description: |
+      Run tempest plugin sanity check script using tox.
+    nodeset: ubuntu-focal
+    vars:
+      tox_envlist: plugin-sanity-check
+    timeout: 5000
+
+- job:
+    name: tempest-full-test-account-py3
+    parent: tempest-full-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials instead of dynamic credentials and py3.
+      Former names for this job were:
+        - legacy-tempest-dsvm-full-test-accounts
+        - legacy-tempest-dsvm-neutron-full-test-accounts
+        - legacy-tempest-dsvm-identity-v3-test-accounts
+    vars:
+      devstack_localrc:
+        TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+    name: tempest-full-test-account-no-admin-py3
+    parent: tempest-full-test-account-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials and py3 without having an admin account.
+      Former name for this job was:
+        - legacy-tempest-dsvm-neutron-full-non-admin
+
+    vars:
+      devstack_localrc:
+        TEMPEST_HAS_ADMIN: False
+
+- job:
+    name: tempest-stestr-master
+    parent: devstack-tempest
+    description: |
+      Smoke integration test with stestr master.
+      This ensures that new stestr release does
+      not break Temepst.
+    vars:
+      tox_envlist: stestr-master
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false