Merge "Need to have stable implementation of apis"
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index 5e3f33a..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,772 +0,0 @@
-- job:
-    name: devstack-tempest
-    parent: devstack
-    description: |
-      Base Tempest job.
-
-      This Tempest job provides the base for both the single and multi-node
-      test setup. To run a multi-node test inherit from devstack-tempest and
-      set the nodeset to a multi-node one.
-    required-projects: &base_required-projects
-      - opendev.org/openstack/tempest
-    timeout: 7200
-    roles: &base_roles
-      - zuul: opendev.org/openstack/devstack
-    vars: &base_vars
-      # TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-      tempest_black_regex: "(tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume|tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON|tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)"
-      devstack_services:
-        tempest: true
-      devstack_local_conf:
-        test-config:
-          $TEMPEST_CONFIG:
-            compute:
-              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
-      test_results_stage_name: test_results
-      zuul_copy_output:
-        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
-        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
-        '{{ devstack_base_dir }}/tempest/tempest.log': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
-        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
-        '{{ stage_dir }}/stackviz': logs
-      extensions_to_txt:
-        conf: true
-        log: true
-        yaml: true
-        yml: true
-    run: playbooks/devstack-tempest.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-all
-    parent: devstack-tempest
-    description: |
-      Integration test that runs all tests.
-      Former name for this job was:
-        * legacy-periodic-tempest-dsvm-all-master
-    vars:
-      tox_envlist: all
-      tempest_test_regex: tempest
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-
-- job:
-    name: devstack-tempest-ipv6
-    parent: devstack-ipv6
-    description: |
-      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
-      which set the IPv6-only setting for OpenStack services. As part of
-      run phase, this job will verify the IPv6 setting and check the services
-      endpoints and listen addresses are IPv6. Basically it will run the script
-      ./tool/verify-ipv6-only-deployments.sh
-
-      Child jobs of this job can run their own set of tests and can
-      add post-run playebooks to extend the IPv6 verification specific
-      to their deployed services.
-      Check the wiki page for more details about project jobs setup
-      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
-    required-projects: *base_required-projects
-    timeout: 7200
-    roles: *base_roles
-    vars: *base_vars
-    run: playbooks/devstack-tempest-ipv6.yaml
-    post-run: playbooks/post-tempest.yaml
-
-- job:
-    name: tempest-ipv6-only
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Integration test of IPv6-only deployments. This job runs
-      smoke and IPv6 relates tests only. Basic idea is to test
-      whether OpenStack Services listen on IPv6 addrress or not.
-    timeout: 10800
-    vars:
-      tox_envlist: ipv6-only
-
-- job:
-    name: tempest-full
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py27.
-      Former names for this job where:
-        * legacy-tempest-dsvm-neutron-full
-        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        USE_PYTHON3: False
-      devstack_services:
-        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
-        # since tempest-full is in the integrated-gate project template but
-        # the backup tests do not really involve other services so they should
-        # be run in some more cinder-specific job, especially because the
-        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-full-oslo-master
-    parent: tempest-full
-    description: |
-      Integration test using current git of oslo libs.
-      This ensures that when oslo libs get released that they
-      do not break OpenStack server projects.
-
-      Former name for this job was
-      periodic-tempest-dsvm-oslo-latest-full-master.
-    timeout: 10800
-    required-projects:
-      - opendev.org/openstack/oslo.cache
-      - opendev.org/openstack/oslo.concurrency
-      - opendev.org/openstack/oslo.config
-      - opendev.org/openstack/oslo.context
-      - opendev.org/openstack/oslo.db
-      - opendev.org/openstack/oslo.i18n
-      - opendev.org/openstack/oslo.log
-      - opendev.org/openstack/oslo.messaging
-      - opendev.org/openstack/oslo.middleware
-      - opendev.org/openstack/oslo.policy
-      - opendev.org/openstack/oslo.privsep
-      - opendev.org/openstack/oslo.reports
-      - opendev.org/openstack/oslo.rootwrap
-      - opendev.org/openstack/oslo.serialization
-      - opendev.org/openstack/oslo.service
-      - opendev.org/openstack/oslo.utils
-      - opendev.org/openstack/oslo.versionedobjects
-      - opendev.org/openstack/oslo.vmware
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: tempest-full-parallel
-    parent: tempest-full-py3
-    voting: false
-    branches:
-      - master
-    description: |
-      Base integration test with Neutron networking.
-      It includes all scenarios as it was in the past.
-      This job runs all scenario tests in parallel!
-    timeout: 9000
-    vars:
-      tox_envlist: full-parallel
-      run_tempest_cleanup: true
-      run_tempest_dry_cleanup: true
-
-- job:
-    name: tempest-full-py3
-    parent: devstack-tempest
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking and py3.
-      Former names for this job where:
-        * legacy-tempest-dsvm-py35
-        * gate-tempest-dsvm-py35
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        GLANCE_USE_IMPORT_WORKFLOW: True
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        # NOTE(mriedem): Disable the cinder-backup service from
-        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
-        # project template but the backup tests do not really involve other
-        # services so they should be run in some more cinder-specific job,
-        # especially because the tests fail at a high rate (see bugs 1483434,
-        # 1813217, 1745168)
-        c-bak: false
-
-- job:
-    name: tempest-integrated-networking
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This  job runs integration tests for networking. This is subset of
-      'tempest-full' job and run only Neutron and Nova related tests.
-      This is meant to be run on neutron gate only.
-    vars:
-      tox_envlist: integrated-network
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-compute
-    parent: devstack-tempest
-    nodeset: openstack-single-node-bionic
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for compute. This is
-      subset of 'tempest-full' job and run Nova, Neutron, Cinder (except backup tests)
-      and Glance related tests. This is meant to be run on Nova gate only.
-    vars:
-      tox_envlist: integrated-compute
-      tempest_black_regex: ""
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-placement
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for placement. This is
-      subset of 'tempest-full' job and run Nova and Neutron
-      related tests. This is meant to be run on Placement gate only.
-    vars:
-      tox_envlist: integrated-placement
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        c-bak: false
-
-- job:
-    name: tempest-integrated-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for image & block storage. This is
-      subset of 'tempest-full' job and run Cinder, Glance, Swift and Nova
-      related tests. This is meant to be run on Cinder and Glance gate only.
-    vars:
-      tox_envlist: integrated-storage
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        GLANCE_USE_IMPORT_WORKFLOW: True
-
-- job:
-    name: tempest-integrated-object-storage
-    parent: devstack-tempest
-    branches: ^(?!stable/ocata).*$
-    description: |
-      This job runs integration tests for object storage. This is
-      subset of 'tempest-full' job and run Swift, Cinder and Glance
-      related tests. This is meant to be run on Swift gate only.
-    vars:
-      tox_envlist: integrated-object-storage
-      devstack_localrc:
-        # NOTE(gmann): swift is not ready on python3 yet and devstack
-        # install it on python2.7 only. But settting the USE_PYTHON3
-        # for future once swift is ready on py3.
-        USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-ipv6
-    parent: devstack-tempest-ipv6
-    # This currently works from stable/pike on.
-    # Before stable/pike, legacy version of tempest-full
-    # 'legacy-tempest-dsvm-neutron-full' run.
-    branches: ^(?!stable/ocata).*$
-    description: |
-      Base integration test with Neutron networking, IPv6 and py3.
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-
-- job:
-    name: tempest-multinode-full-base
-    parent: devstack-tempest
-    description: |
-      Base multinode integration test with Neutron networking and py27.
-      Former names for this job were:
-        * neutron-tempest-multinode-full
-        * legacy-tempest-dsvm-neutron-multinode-full
-        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
-      This job includes two nodes, controller / tempest plus a subnode, but
-      it can be used with different topologies, as long as a controller node
-      and a tempest one exist.
-    timeout: 10800
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        FORCE_CONFIG_DRIVE: false
-        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-        LIVE_MIGRATION_AVAILABLE: true
-        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-    group-vars:
-      peers:
-        devstack_localrc:
-          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
-          LIVE_MIGRATION_AVAILABLE: true
-          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-focal
-    # This job runs on Focal from stable/victoria on.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-bionic
-    # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
-    # This job is prepared to make sure all stable branches from stable/stein till stable/train
-    # will keep running on bionic. This can be removed once stable/train is EOL.
-    branches:
-      - stable/stein
-      - stable/train
-      - stable/ussuri
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-xenial
-    # This job runs on Xenial and this is for stable/pike, stable/queens
-    # and stable/rocky. This job is prepared to make sure all stable branches
-    # before stable/stein will keep running on xenial. This job can be
-    # removed once stable/rocky is EOL.
-    branches:
-      - stable/pike
-      - stable/queens
-      - stable/rocky
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-py3-opensuse15
-    parent: tempest-full-py3
-    nodeset: devstack-single-node-opensuse-15
-    description: |
-      Base integration test with Neutron networking and py36 running
-      on openSUSE Leap 15.x
-    voting: false
-
-- job:
-    name: tempest-slow
-    parent: tempest-multinode-full
-    description: |
-      This multinode integration job will run all the tests tagged as slow.
-      It enables the lvm multibackend setup to cover few scenario tests.
-      This job will run only slow tests (API or Scenario) serially.
-
-      Former names for this job were:
-        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
-        * tempest-scenario-multinode-lvm-multibackend
-    timeout: 10800
-    vars:
-      tox_envlist: slow-serial
-      devstack_localrc:
-        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
-        ENABLE_VOLUME_MULTIATTACH: true
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron
-      devstack_services:
-        neutron-placement: true
-        neutron-qos: true
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
-      tempest_concurrency: 2
-    group-vars:
-      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
-      # the controller and subnode prior to Rocky so we have to make sure the
-      # variable is set in both locations.
-      subnode:
-        devstack_localrc:
-          ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
-    name: tempest-slow-py3
-    parent: tempest-slow
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
-    name: tempest-full-ussuri-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/ussuri
-
-- job:
-    name: tempest-full-train-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/train
-
-- job:
-    name: tempest-full-stein-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein
-
-- job:
-    name: tempest-tox-plugin-sanity-check
-    parent: tox
-    description: |
-      Run tempest plugin sanity check script using tox.
-    nodeset: ubuntu-focal
-    vars:
-      tox_envlist: plugin-sanity-check
-    timeout: 5000
-
-- job:
-    name: tempest-cinder-v2-api
-    parent: devstack-tempest
-    branches:
-      - master
-    description: |
-      This job runs the cinder API test against v2 endpoint.
-    vars:
-      tox_envlist: all
-      tempest_test_regex: api.*volume
-      devstack_localrc:
-        TEMPEST_VOLUME_TYPE: volumev2
-
-- job:
-    name: tempest-full-test-account-py3
-    parent: tempest-full-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials instead of dynamic credentials and py3.
-      Former names for this job were:
-        - legacy-tempest-dsvm-full-test-accounts
-        - legacy-tempest-dsvm-neutron-full-test-accounts
-        - legacy-tempest-dsvm-identity-v3-test-accounts
-    vars:
-      devstack_localrc:
-        TEMPEST_USE_TEST_ACCOUNTS: True
-
-- job:
-    name: tempest-full-test-account-no-admin-py3
-    parent: tempest-full-test-account-py3
-    description: |
-      This job runs the full set of tempest tests using pre-provisioned
-      credentials and py3 without having an admin account.
-      Former name for this job was:
-        - legacy-tempest-dsvm-neutron-full-non-admin
-
-    vars:
-      devstack_localrc:
-        TEMPEST_HAS_ADMIN: False
-
-- job:
-    name: tempest-pg-full
-    parent: tempest-full
-    description: |
-      Base integration test with Neutron networking and PostgreSQL.
-      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
-    vars:
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        DATABASE_TYPE: postgresql
-        USE_PYTHON3: True
-
-- project-template:
-    name: integrated-gate-networking
-    description: |
-      Run the python3 Tempest network integration tests (Nova and Neutron related)
-      in check and gate for the neutron integrated gate. This is meant to be
-      run on neutron gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-networking
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-networking
-
-- project-template:
-    name: integrated-gate-compute
-    description: |
-      Run the python3 Tempest compute integration tests
-      (Nova, Neutron, Cinder and Glance related) in check and gate
-      for the Nova integrated gate. This is meant to be
-      run on Nova gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-compute
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-compute
-
-- project-template:
-    name: integrated-gate-placement
-    description: |
-      Run the python3 Tempest placement integration tests
-      (Nova and Neutron related) in check and gate
-      for the Placement integrated gate. This is meant to be
-      run on Placement gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-placement
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-placement
-
-- project-template:
-    name: integrated-gate-storage
-    description: |
-      Run the python3 Tempest image & block storage integration tests
-      (Cinder, Glance, Swift and Nova related) in check and gate
-      for the neutron integrated gate. This is meant to be
-      run on Cinder and Glance gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-storage
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-storage
-
-- project-template:
-    name: integrated-gate-object-storage
-    description: |
-      Run the python3 Tempest object storage integration tests
-      (Swift, Cinder and Glance related) in check and gate
-      for the swift integrated gate. This is meant to be
-      run on swift gate only.
-    check:
-      jobs:
-        - grenade
-        - tempest-integrated-object-storage
-    gate:
-      jobs:
-        - grenade
-        - tempest-integrated-object-storage
-
-- project:
-    templates:
-      - check-requirements
-      - integrated-gate-py3
-      - openstack-cover-jobs
-      - openstack-python3-victoria-jobs
-      - publish-openstack-docs-pti
-      - release-notes-jobs-python3
-    check:
-      jobs:
-        - devstack-tempest:
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - devstack-tempest-ipv6:
-            voting: false
-            files:
-              - ^playbooks/
-              - ^roles/
-              - ^.zuul.yaml$
-        - tempest-full-parallel:
-            # Define list of irrelevant files to use everywhere else
-            irrelevant-files: &tempest-irrelevant-files
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^tools/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-ipv6:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-ussuri-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-train-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-stein-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-multinode-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-tox-plugin-sanity-check:
-            irrelevant-files: &tempest-irrelevant-files-2
-              - ^.*\.rst$
-              - ^doc/.*$
-              - ^etc/.*$
-              - ^releasenotes/.*$
-              - ^setup.cfg$
-              - ^tempest/hacking/.*$
-              - ^tempest/tests/.*$
-              - ^.coveragerc$
-              - ^.gitignore$
-              - ^.gitreview$
-              - ^.mailmap$
-              # tools/ is not here since this relies on a script in tools/.
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-live-migration:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade:
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr:
-            irrelevant-files: *tempest-irrelevant-files
-        - interop-tempest-consistency:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-test-account-no-admin-py3:
-            voting: false
-            irrelevant-files: *tempest-irrelevant-files
-        - openstack-tox-bashate:
-            irrelevant-files: *tempest-irrelevant-files-2
-    gate:
-      jobs:
-        - tempest-slow-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-grenade-multinode:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - grenade:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-ipv6-only:
-            irrelevant-files: *tempest-irrelevant-files-2
-        - devstack-plugin-ceph-tempest-py3:
-            irrelevant-files: *tempest-irrelevant-files
-    experimental:
-      jobs:
-        - tempest-cinder-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-all:
-            irrelevant-files: *tempest-irrelevant-files
-        - neutron-tempest-dvr-ha-multinode-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - nova-tempest-v2-api:
-            irrelevant-files: *tempest-irrelevant-files
-        - cinder-tempest-lvm-multibackend:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-pg-full:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-py3-opensuse15:
-            irrelevant-files: *tempest-irrelevant-files
-    periodic-stable:
-      jobs:
-        - tempest-full-ussuri-py3
-        - tempest-full-train-py3
-        - tempest-full-stein-py3
-    periodic:
-      jobs:
-        - tempest-all
-        - tempest-full-oslo-master
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 5bc0eac..c7004dd 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -352,15 +352,15 @@
 
   * `2.37`_
 
-  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
+  .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
 
   * `2.39`_
 
-  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
+  .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id36
 
   * `2.41`_
 
-  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id37
+  .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id38
 
   * `2.42`_
 
@@ -368,15 +368,15 @@
 
   * `2.47`_
 
-  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+  .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
 
   * `2.48`_
 
-  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+  .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
 
   * `2.49`_
 
-  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+  .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
 
   * `2.53`_
 
@@ -384,15 +384,15 @@
 
   * `2.54`_
 
-  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+  .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
 
   * `2.55`_
 
-  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
+  .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
 
   * `2.57`_
 
-  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
+  .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53
 
   * `2.59`_
 
@@ -404,19 +404,19 @@
 
   * `2.61`_
 
-  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+  .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id56
 
   * `2.63`_
 
-  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
+  .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
 
   * `2.70`_
 
-  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id63
+  .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
 
   * `2.71`_
 
-  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+  .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id65
 
   * `2.73`_
 
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 5f72345..4788362 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -7,11 +7,6 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
   roles:
     - setup-tempest-run-dir
     - setup-tempest-data-dir
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 7ee7411..3b969f2 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,11 +7,6 @@
 
 # We run tests only on one node, regardless how many nodes are in the system
 - hosts: tempest
-  environment:
-    # This enviroment variable is used by the optional tempest-gabbi
-    # job provided by the gabbi-tempest plugin. It can be safely ignored
-    # if that plugin is not being used.
-    GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
   tasks:
     - name: Setup Tempest Run Directory
       include_role:
@@ -30,9 +25,9 @@
         name: tempest-cleanup
       vars:
         init_saved_state: true
-      when:
-        - run_tempest_dry_cleanup is defined
-        - run_tempest_cleanup is defined
+      when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+            (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
 
     - name: Run Tempest
       include_role:
@@ -43,10 +38,9 @@
         name: tempest-cleanup
       vars:
         dry_run: true
-      when:
-        - run_tempest_dry_cleanup is defined
+      when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
 
     - name: Run tempest cleanup
       include_role:
         name: tempest-cleanup
-      when: run_tempest_cleanup is defined
+      when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
new file mode 100644
index 0000000..fb84d25
--- /dev/null
+++ b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    The test_reboot_server_soft has been skipped for more than 6 years.
+    Take into account that the minimum scenario test uses soft reboot
+    and the nova functional test also covers reboot.
diff --git a/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
new file mode 100644
index 0000000..121e060
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add a new config option ``[compute-feature-enabled] shelve_migrate``
+    which enable test for environment that support cold migration of qcow2
+    unshelved instance.
diff --git a/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
new file mode 100644
index 0000000..159bbe8
--- /dev/null
+++ b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - A new config option in the validation section, image_alt_ssh_user,
+    to specify the user name used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to root.
+  - A new config option in the validation section, image_alt_ssh_password,
+    to specify the password used to authenticate to an alternative
+    instance (instance using image_ref_alt) in tests. By default this
+    is set to password.
diff --git a/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
new file mode 100644
index 0000000..2cd5af6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add show type API to v3 types_client library.
+
+    * default_volume_type
diff --git a/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
new file mode 100644
index 0000000..1f2d6b9
--- /dev/null
+++ b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    New config option to ``network-feature-enabled``: ``available_features``.
+    This is a list which can contain features that are not discoverable
+    through Neutron API, or it can be the special entry ``all``.
diff --git a/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
new file mode 100644
index 0000000..9e6d49a
--- /dev/null
+++ b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add ``placement`` API methods for testing Routed Provider Networks feature.
+    The following API calls are available for tempest from now in the new
+    resource_providers_client:
+
+    * GET /resource_providers
+    * GET /resource_providers/{uuid}
+    * GET /resource_providers/{uuid}/inventories
+    * GET /resource_providers/{uuid}/aggregates
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index 70719ca..d1fad90 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -31,3 +31,31 @@
    When true, tempest cleanup creates a report (./dry_run.json) of the
    resources that would be cleaned up if the role was ran with dry_run option
    set to false.
+
+.. zuul:rolevar:: run_tempest_fail_if_leaked_resources
+   :default: false
+
+   When true, the role will fail if any leaked resources are detected.
+   The detection is done via dry_run.json file which if contains any resources,
+   some must have been leaked. This can be also used to verify that tempest
+   cleanup was successful.
+
+
+Role usage
+----------
+
+The role can be also used for verification that tempest tests don't leak any
+resources or to test that 'tempest cleanup' command deleted all leaked
+resources as expected.
+Either way the role needs to be run first with init_saved_state variable set
+to true prior any tempest tests got executed.
+Then, after tempest tests got executed this role needs to be run again with
+role variables set according to the desired outcome:
+
+1. to verify that tempest tests don't leak any resources
+   run_tempest_dry_cleanup and run_tempest_fail_if_leaked_resources have to
+   be set to true.
+
+2. to check that 'tempest cleanup' command deleted all the leaked resources
+   run_tempest_cleanup and run_tempest_fail_if_leaked_resources have to be set
+   to true.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index fc1948a..ce78bdb 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -1,3 +1,4 @@
 devstack_base_dir: /opt/stack
 init_saved_state: false
 dry_run: false
+run_tempest_fail_if_leaked_resources: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
new file mode 100644
index 0000000..46749ab
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -0,0 +1,7 @@
+---
+- name: Run tempest cleanup dry-run
+  become: yes
+  become_user: tempest
+  command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+  args:
+    chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/tempest-cleanup/tasks/dry_run_checker.py b/roles/tempest-cleanup/tasks/dry_run_checker.py
new file mode 100644
index 0000000..9cd9a85
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run_checker.py
@@ -0,0 +1,71 @@
+# Copyright 2020 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for content checking of a given dry_run.json file.
+"""
+
+import argparse
+import json
+import sys
+
+
+def get_parser():
+    parser = argparse.ArgumentParser(__doc__)
+    parser.add_argument('--is-empty', action="store_true", dest='is_empty',
+                        default=False,
+                        help="""Are values of a given dry_run.json empty?""")
+    parser.add_argument('--file', dest='file', default=None, metavar='PATH',
+                        help="A path to a dry_run.json file.")
+    return parser
+
+
+def parse_arguments():
+    parser = get_parser()
+    args = parser.parse_args()
+    if not args.file:
+        sys.stderr.write('Path to a dry_run.json must be specified.\n')
+        sys.exit(1)
+    return args
+
+
+def load_json(path):
+    """Load json content from file addressed by path."""
+    try:
+        with open(path, 'rb') as json_file:
+            json_data = json.load(json_file)
+    except Exception as ex:
+        sys.exit(ex)
+    return json_data
+
+
+def are_values_empty(dry_run_content):
+    """Return true if values of dry_run.json are empty."""
+    for value in dry_run_content.values():
+        if value:
+            return False
+    return True
+
+
+def main():
+    args = parse_arguments()
+    content = load_json(args.file)
+    if args.is_empty:
+        if not are_values_empty(content):
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index 5444afc..c1d63f0 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -12,20 +12,35 @@
 
 - when: dry_run
   block:
-    - name: Run tempest cleanup dry-run
-      become: yes
-      become_user: tempest
-      command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
-      args:
-        chdir: "{{ devstack_base_dir }}/tempest"
+    - import_tasks: dry_run.yaml
 
     - name: Cat dry_run.json
       command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
 
-- name: Run tempest cleanup
-  become: yes
-  become_user: tempest
-  command: tox -evenv-tempest -- tempest cleanup --debug
-  args:
-    chdir: "{{ devstack_base_dir }}/tempest"
-  when: not dry_run and not init_saved_state
+- when:
+    - not dry_run
+    - not init_saved_state
+  block:
+    - name: Run tempest cleanup
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --debug
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+
+- when:
+    - run_tempest_fail_if_leaked_resources
+    - not init_saved_state
+  block:
+    # let's run dry run again, if haven't already, to check no leftover
+    # resources were left behind after the cleanup in the previous task
+    - import_tasks: dry_run.yaml
+      when: not dry_run
+
+    - name: Fail if any resources are leaked
+      become: yes
+      become_user: tempest
+      shell: |
+        python3 roles/tempest-cleanup/tasks/dry_run_checker.py --file {{ devstack_base_dir }}/tempest/dry_run.json --is-empty
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
new file mode 100644
index 0000000..9340997
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume.py
@@ -0,0 +1,118 @@
+# Copyright 2020 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import six
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseAttachSCSIVolumeTest(base.BaseV2ComputeAdminTest):
+    """Base class for the admin volume tests in this module."""
+    create_default_network = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaseAttachSCSIVolumeTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
+
+    def _create_image_with_custom_property(self, **kwargs):
+        """Wrapper utility that returns the custom image.
+
+        Creates a new image by downloading the default image's bits and
+        uploading them to a new image. Any kwargs are set as image properties
+        on the new image.
+
+        :param return image_id: The UUID of the newly created image.
+        """
+        image = self.image_client.show_image(CONF.compute.image_ref)
+        image_data = self.image_client.show_image_file(
+            CONF.compute.image_ref).data
+        image_file = six.BytesIO(image_data)
+        create_dict = {
+            'container_format': image['container_format'],
+            'disk_format': image['disk_format'],
+            'min_disk': image['min_disk'],
+            'min_ram': image['min_ram'],
+            'visibility': 'public',
+        }
+        create_dict.update(kwargs)
+        new_image = self.image_client.create_image(**create_dict)
+        self.addCleanup(self.image_client.wait_for_resource_deletion,
+                        new_image['id'])
+        self.addCleanup(self.image_client.delete_image, new_image['id'])
+        self.image_client.store_image_file(new_image['id'], image_file)
+
+        return new_image['id']
+
+
+class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
+    """Test attaching scsi volume to server"""
+
+    @decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
+    def test_attach_scsi_disk_with_config_drive(self):
+        """Test the attach/detach volume with config drive/scsi disk
+
+        Enable the config drive, followed by booting an instance
+        from an image with meta properties hw_cdrom: scsi and use
+        virtio-scsi mode with further asserting list volume attachments
+        in instance after attach and detach of the volume.
+        """
+        custom_img = self._create_image_with_custom_property(
+            hw_scsi_model='virtio-scsi',
+            hw_disk_bus='scsi',
+            hw_cdrom_bus='scsi')
+        server = self.create_test_server(image_id=custom_img,
+                                         config_drive=True,
+                                         wait_until='ACTIVE')
+
+        # NOTE(lyarwood): self.create_test_server delete the server
+        # at class level cleanup so add server cleanup to ensure that
+        # the instance is deleted first before created image. This
+        # avoids failures when using the rbd backend is used for both
+        # Glance and Nova ephemeral storage. Also wait until server is
+        # deleted otherwise image deletion can start before server is
+        # deleted.
+        self.addCleanup(waiters.wait_for_server_termination,
+                        self.servers_client, server['id'])
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        volume = self.create_volume()
+        attachment = self.attach_volume(server, volume)
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'in-use')
+        volume_after_attach = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(1, len(volume_after_attach),
+                         "Failed to attach volume")
+        self.servers_client.detach_volume(
+            server['id'], attachment['volumeId'])
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, attachment['volumeId'], 'available')
+        volume_after_detach = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(0, len(volume_after_detach),
+                         "Failed to detach volume")
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8b847fc..bb0f5ad 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -171,8 +171,11 @@
         cls.flavor_ref = CONF.compute.flavor_ref
         cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
         cls.ssh_user = CONF.validation.image_ssh_user
+        cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_user = CONF.validation.image_ssh_user
+        cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
         cls.image_ssh_password = CONF.validation.image_ssh_password
+        cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
 
     @classmethod
     def is_requested_microversion_compatible(cls, max_version):
@@ -634,6 +637,7 @@
             cls.os_admin.availability_zone_client)
         cls.admin_flavors_client = cls.os_admin.flavors_client
         cls.admin_servers_client = cls.os_admin.servers_client
+        cls.image_client = cls.os_admin.image_client_v2
 
     def create_flavor(self, ram, vcpus, disk, name=None,
                       is_public='True', **kwargs):
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 59848f6..3c4daf6 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -35,16 +35,16 @@
         cls.from_port = 22
         cls.to_port = 22
 
-    def setUp(cls):
-        super(SecurityGroupRulesTestJSON, cls).setUp()
+    def setUp(self):
+        super(SecurityGroupRulesTestJSON, self).setUp()
 
-        from_port = cls.from_port
-        to_port = cls.to_port
+        from_port = self.from_port
+        to_port = self.to_port
         group = {}
         ip_range = {}
-        cls.expected = {
+        self.expected = {
             'parent_group_id': None,
-            'ip_protocol': cls.ip_protocol,
+            'ip_protocol': self.ip_protocol,
             'from_port': from_port,
             'to_port': to_port,
             'ip_range': ip_range,
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index a7e2187..58d4d7d 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -338,7 +338,9 @@
         found_devices = [d['tags'][0] for d in md_dict['devices']
                          if d.get('tags')]
         try:
-            self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+            self.assertEqual(
+                sorted(found_devices),
+                sorted(['nic-tag', 'volume-tag']))
             return True
         except Exception:
             return False
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 7931ca9..6ebdbdb 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -26,11 +26,6 @@
 
 CONF = config.CONF
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 
 class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
     """Test novnc console"""
@@ -116,14 +111,14 @@
             # single word(4 bytes).
             self.assertEqual(
                 data_length, 4, 'Expected authentication type None.')
-            self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+            self.assertIn(1, [int(data[i]) for i in (0, 3)],
                           'Expected authentication type None.')
         else:
             self.assertGreaterEqual(
                 len(data), 2, 'Expected authentication type None.')
             self.assertIn(
                 1,
-                [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+                [int(data[i + 1]) for i in range(int(data[0]))],
                 'Expected authentication type None.')
             # Send to the server that we only support authentication
             # type None
@@ -136,7 +131,7 @@
                 len(data), 4,
                 'Server did not think security was successful.')
             self.assertEqual(
-                [ord_func(i) for i in data], [0, 0, 0, 0],
+                [int(i) for i in data], [0, 0, 0, 0],
                 'Server did not think security was successful.')
 
         # Say to leave the desktop as shared as part of client initialization
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 4db6987..4527aa9 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -160,15 +160,6 @@
         """
         self._test_reboot_server('HARD')
 
-    @decorators.skip_because(bug="1014647")
-    @decorators.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
-    def test_reboot_server_soft(self):
-        """Test soft rebooting server
-
-        The server should be signaled to reboot gracefully.
-        """
-        self._test_reboot_server('SOFT')
-
     @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
     def test_remove_server_all_security_groups(self):
         """Test removing all security groups from server"""
@@ -237,7 +228,7 @@
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(rebuilt_server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password,
                 validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
@@ -319,7 +310,7 @@
                 self.os_primary)
             linux_client = remote_client.RemoteClient(
                 self.get_server_ip(server, validation_resources),
-                self.ssh_user,
+                self.ssh_alt_user,
                 password=None,
                 pkey=validation_resources['keypair']['private_key'],
                 server=server,
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index a697b95..655909c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -45,7 +45,7 @@
         # Attempt to start a server with a meta-data key that is > 255
         # characters
 
-        # Tryset_server_metadata_item a few values
+        # Try create a server with the metadata for a few values
         for sz in [256, 257, 511, 1023]:
             key = "k" * sz
             meta = {key: 'data1'}
@@ -86,11 +86,15 @@
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('0025fbd6-a4ba-4cde-b8c2-96805dcfdabc')
-    def test_wrong_key_passed_in_body(self):
+    def test_set_metadata_invalid_key(self):
         """Test setting server metadata item with wrong key in body
 
         Raise BadRequest if key in uri does not match the key passed in body.
         """
+        if not CONF.compute_feature_enabled.xenapi_apis:
+            raise self.skipException(
+                'Metadata is read-only on non-Xen-based deployments.')
+
         meta = {'testkey': 'testvalue'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata_item,
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ba2adbb..8a05e7a 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -29,6 +29,7 @@
 
 class ServerPersonalityTestJSON(base.BaseV2ComputeTest):
     """Test servers with injected files"""
+    max_microversion = '2.56'
 
     @classmethod
     def setup_credentials(cls):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 5445113..c222893 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
@@ -189,6 +190,7 @@
         self._test_stable_device_rescue(server_id, rescue_image_id)
 
     @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+    @utils.services('volume')
     def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
         """Test rescuing server with volume attached
 
@@ -214,6 +216,13 @@
 
     min_microversion = '2.87'
 
+    @classmethod
+    def skip_checks(cls):
+        super(ServerBootFromVolumeStableRescueTest, cls).skip_checks()
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
     @decorators.attr(type='slow')
     @decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
     def test_stable_device_rescue_bfv_blank_volume(self):
diff --git a/tempest/api/identity/admin/v3/test_application_credentials.py b/tempest/api/identity/admin/v3/test_application_credentials.py
index c9cafd8..f5b0356 100644
--- a/tempest/api/identity/admin/v3/test_application_credentials.py
+++ b/tempest/api/identity/admin/v3/test_application_credentials.py
@@ -37,7 +37,7 @@
         secret = app_cred['secret']
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
diff --git a/tempest/api/identity/v3/test_application_credentials.py b/tempest/api/identity/v3/test_application_credentials.py
index 77ad720..06734aa 100644
--- a/tempest/api/identity/v3/test_application_credentials.py
+++ b/tempest/api/identity/v3/test_application_credentials.py
@@ -51,7 +51,7 @@
         self.assertNotIn('secret', app_cred)
 
         # Check that the application credential is functional
-        token_id, resp = self.non_admin_token.get_token(
+        _, resp = self.non_admin_token.get_token(
             app_cred_id=app_cred['id'],
             app_cred_secret=secret,
             auth_data=True
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index 7e13d7f..ad68d82 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -13,10 +13,16 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import six
+
 from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
     """"Test image operations about image owner"""
@@ -52,3 +58,65 @@
         self.assertEqual(random_id_2, updated_image_info['owner'])
         self.assertNotEqual(created_image_info['owner'],
                             updated_image_info['owner'])
+
+
+class ImportCopyImagesTest(base.BaseV2ImageAdminTest):
+    """Test the import copy-image operations"""
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImportCopyImagesTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.import_image:
+            skip_msg = (
+                "%s skipped as image import is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('9b3b644e-03d1-11eb-a036-fa163e2eaf49')
+    def test_image_copy_image_import(self):
+        """Test 'copy-image' import functionalities
+
+        Create image, import image with copy-image method and
+        verify that import succeeded.
+        """
+        available_stores = self.get_available_stores()
+        available_import_methods = self.client.info_import()[
+            'import-methods']['value']
+        # NOTE(gmann): Skip if copy-image import method and multistore
+        # are not available.
+        if ('copy-image' not in available_import_methods or
+            not available_stores):
+            raise self.skipException('Either copy-image import method or '
+                                     'multistore is not available')
+        uuid = data_utils.rand_uuid()
+        image_name = data_utils.rand_name('copy-image')
+        container_format = CONF.image.container_formats[0]
+        disk_format = CONF.image.disk_formats[0]
+        image = self.create_image(name=image_name,
+                                  container_format=container_format,
+                                  disk_format=disk_format,
+                                  visibility='private',
+                                  ramdisk_id=uuid)
+        self.assertEqual('queued', image['status'])
+
+        file_content = data_utils.random_bytes()
+        image_file = six.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+
+        body = self.client.show_image(image['id'])
+        self.assertEqual(image['id'], body['id'])
+        self.assertEqual(len(file_content), body.get('size'))
+        self.assertEqual('active', body['status'])
+
+        # Copy image to all the stores. In case of all_stores request
+        # glance will skip the stores where image is already available.
+        self.admin_client.image_import(image['id'], method='copy-image',
+                                       all_stores=True,
+                                       all_stores_must_succeed=False)
+
+        # Wait for copy to finished on all stores.
+        failed_stores = waiters.wait_for_image_copied_to_stores(
+            self.client, image['id'])
+        # Assert if copy is failed on any store.
+        self.assertEqual(0, len(failed_stores),
+                         "Failed to copy the following stores: %s" %
+                         str(failed_stores))
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index c32d3c1..eb31d24 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -66,7 +66,7 @@
         cls.create_router_interface(cls.router['id'], cls.subnet['id'])
         # Create two ports one each for Creation and Updating of floatingIP
         cls.ports = []
-        for i in range(2):
+        for _ in range(2):
             port = cls.create_port(cls.network)
             cls.ports.append(port)
 
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index c5334a9..eb2ef7f 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -92,7 +92,7 @@
             # create object in container
             object_name = data_utils.rand_name(name='TestSyncObject')
             data = object_name[::-1].encode()  # Raw data, we need bytes
-            resp, _ = obj_client[0].create_object(cont[0], object_name, data)
+            obj_client[0].create_object(cont[0], object_name, data)
             self.objects.append(object_name)
 
         # wait until container contents list is not empty
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index c611ed6..365dc78 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -32,9 +32,6 @@
 
         cls.xml_end = "</cross-domain-policy>"
 
-    def setUp(self):
-        super(CrossdomainTest, self).setUp()
-
     @decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
     @utils.requires_ext(extension='crossdomain', service='object')
     def test_get_crossdomain_policy(self):
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index f5e2443..d4a6a9f2 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -21,9 +21,6 @@
 class HealthcheckTest(base.BaseObjectTest):
     """Test healthcheck"""
 
-    def setUp(self):
-        super(HealthcheckTest, self).setUp()
-
     @decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
     def test_get_healthcheck(self):
         """Test getting healthcheck"""
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 4ecbcad..fc9b1a2 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -182,6 +182,7 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
+    @decorators.skip_because(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
@@ -770,11 +771,11 @@
         headers = {}
         headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
                                             str(object_name))
-        resp, body = self.object_client.create_object(self.container_name,
-                                                      object_name,
-                                                      data=None,
-                                                      metadata=metadata,
-                                                      headers=headers)
+        resp, _ = self.object_client.create_object(self.container_name,
+                                                   object_name,
+                                                   data=None,
+                                                   metadata=metadata,
+                                                   headers=headers)
         self.assertHeaders(resp, 'Object', 'PUT')
 
         # check the content type
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 7e553ca..664bbc8 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -165,6 +165,6 @@
 
         self.assertHeaders(resp, 'Object', 'DELETE')
 
-        resp, body = self.container_client.list_container_objects(
+        resp, _ = self.container_client.list_container_objects(
             self.container_name)
         self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 76c22f0..389d3be 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -127,14 +127,14 @@
     def test_update_volume_with_nonexistent_volume_id(self):
         """Test updating non existent volume should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_uuid())
+                          volume_id=data_utils.rand_uuid(), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
     def test_update_volume_with_invalid_volume_id(self):
         """Test updating volume with invalid volume id should fail"""
         self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
-                          volume_id=data_utils.rand_name('invalid'))
+                          volume_id=data_utils.rand_name('invalid'), name="n")
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
diff --git a/tempest/clients.py b/tempest/clients.py
index 8363a8d..6d19a0c 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,7 +44,7 @@
         self._set_object_storage_clients()
         self._set_image_clients()
         self._set_network_clients()
-        self.placement_client = self.placement.PlacementClient()
+        self._set_placement_clients()
         # TODO(andreaf) This is maintained for backward compatibility
         # with plugins, but it should removed eventually, since it was
         # never a stable interface and it's not useful anyways
@@ -139,6 +139,11 @@
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
 
+    def _set_placement_clients(self):
+        self.placement_client = self.placement.PlacementClient()
+        self.resource_providers_client = \
+            self.placement.ResourceProvidersClient()
+
     def _set_identity_clients(self):
         # Clients below use the admin endpoint type of Keystone API v2
         params_v2_admin = {
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ff552a1..917262e 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -270,7 +270,7 @@
                 config.CONF.set_config_path(parsed_args.config_file)
             setup_logging()
             resources = []
-            for count in range(parsed_args.concurrency):
+            for _ in range(parsed_args.concurrency):
                 # Use N different cred_providers to obtain different
                 # sets of creds
                 cred_provider = get_credential_provider(parsed_args)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index d82b6df..8bebce2 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -129,7 +129,6 @@
 
 from cliff import command
 from oslo_serialization import jsonutils as json
-import six
 from stestr import commands
 
 from tempest import clients
@@ -139,10 +138,6 @@
 from tempest.common import credentials_factory as credentials
 from tempest import config
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 CONF = config.CONF
 SAVED_STATE_JSON = "saved_state.json"
 
@@ -167,7 +162,7 @@
         # environment variable and fall back to "python", under python3
         # if it does not exist. we should set it to the python3 executable
         # to deal with this situation better for now.
-        if six.PY3 and 'PYTHON' not in os.environ:
+        if 'PYTHON' not in os.environ:
             os.environ['PYTHON'] = sys.executable
 
     def _create_stestr_conf(self):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index edb9d16..42f68f1 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -19,7 +19,6 @@
 import struct
 import textwrap
 
-import six
 from six.moves.urllib import parse as urlparse
 
 from oslo_log import log as logging
@@ -31,11 +30,6 @@
 from tempest.lib.common import rest_client
 from tempest.lib.common.utils import data_utils
 
-if six.PY2:
-    ord_func = ord
-else:
-    ord_func = int
-
 CONF = config.CONF
 
 LOG = logging.getLogger(__name__)
@@ -64,7 +58,7 @@
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
-                       image_id=None, **kwargs):
+                       image_id=None, wait_for_sshable=True, **kwargs):
     """Common wrapper utility returning a test server.
 
     This method is a common wrapper returning a test server that can be
@@ -100,6 +94,8 @@
         CONF.compute.flavor_ref will be used instead.
     :param image_id: ID of the image to be used to provision the server. If not
         defined, CONF.compute.image_ref will be used instead.
+    :param wait_for_sshable: Check server's console log and wait until it will
+        be ready to login.
     :returns: a tuple
     """
 
@@ -270,6 +266,10 @@
                             LOG.exception('Server %s failed to delete in time',
                                           server['id'])
 
+    if (validatable and CONF.compute_feature_enabled.console_output and
+            wait_for_sshable):
+        waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
+
     return body, servers
 
 
@@ -365,8 +365,8 @@
             # frames less than 125 bytes here (for the negotiation) and
             # that only the 2nd byte contains the length, and since the
             # server doesn't do masking, we can just read the data length
-            if ord_func(header[1]) & 127 > 0:
-                return self._recv(ord_func(header[1]) & 127)
+            if int(header[1]) & 127 > 0:
+                return self._recv(int(header[1]) & 127)
 
     def send_frame(self, data):
         """Wrapper for sending data to add in the WebSocket frame format."""
@@ -383,7 +383,7 @@
             frame_bytes.append(mask[i])
         # Mask each of the actual data bytes that we are going to send
         for i in range(len(data)):
-            frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+            frame_bytes.append(int(data[i]) ^ mask[i % 4])
         # Convert our integer list to a binary array of bytes
         frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
         self._socket.sendall(frame_bytes)
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 167bf5b..914acf7 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -128,3 +128,18 @@
     if extension_name in config_dict[service]:
         return True
     return False
+
+
+def is_network_feature_enabled(feature_name):
+    """A function that will check the list of available network features
+
+    """
+    list_of_features = CONF.network_feature_enabled.available_features
+
+    if not list_of_features:
+        return False
+    if list_of_features[0] == 'all':
+        return True
+    if feature_name in list_of_features:
+        return True
+    return False
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index cc8778b..625e08e 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -124,12 +124,18 @@
             raise lib_exc.DeleteErrorException(
                 "Server %s failed to delete and is in ERROR status" %
                 server_id)
+
         if server_status == 'SOFT_DELETED':
             # Soft-deleted instances need to be forcibly deleted to
             # prevent some test cases from failing.
             LOG.debug("Automatically force-deleting soft-deleted server %s",
                       server_id)
-            client.force_delete_server(server_id)
+            try:
+                client.force_delete_server(server_id)
+            except lib_exc.NotFound:
+                # The instance may have been deleted so ignore
+                # NotFound exception
+                return
 
         if int(time.time()) - start_time >= client.build_timeout:
             raise lib_exc.TimeoutException
@@ -201,8 +207,39 @@
 
         time.sleep(client.build_interval)
 
-    message = ('Image %(image_id)s failed to import '
-               'on stores: %s' % str(image['os_glance_failed_import']))
+    message = ('Image %s failed to import on stores: %s' %
+               (image_id, str(image['os_glance_failed_import'])))
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
+def wait_for_image_copied_to_stores(client, image_id):
+    """Waits for an image to be copied on all requested stores.
+
+    The client should also have build_interval and build_timeout attributes.
+    This return the list of stores where copy is failed.
+    """
+
+    start = int(time.time())
+    store_left = []
+    while int(time.time()) - start < client.build_timeout:
+        image = client.show_image(image_id)
+        store_left = image.get('os_glance_importing_to_stores')
+        # NOTE(danms): If os_glance_importing_to_stores is None, then
+        # we've raced with the startup of the task and should continue
+        # to wait.
+        if store_left is not None and not store_left:
+            return image['os_glance_failed_import']
+        if image['status'].lower() == 'killed':
+            raise exceptions.ImageKilledException(image_id=image_id,
+                                                  status=image['status'])
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to finish the copy operation '
+               'on stores: %s' % (image_id, str(store_left)))
     caller = test_utils.find_test_caller()
     if caller:
         message = '(%s) %s' % (caller, message)
@@ -400,3 +437,20 @@
                        'the required time (%s s)' % (port_id, server_id,
                                                      client.build_timeout))
             raise lib_exc.TimeoutException(message)
+
+
+def wait_for_guest_os_boot(client, server_id):
+    start_time = int(time.time())
+    while True:
+        console_output = client.get_console_output(server_id)['output']
+        for line in console_output.split('\n'):
+            if 'login:' in line.lower():
+                return
+        if int(time.time()) - start_time >= client.build_timeout:
+            LOG.info("Guest OS on server %s probably isn't ready or its "
+                     "console log can't be parsed properly. If guest OS "
+                     "isn't ready, that may cause problems with SSH to "
+                     "the server.",
+                     server_id)
+            return
+        time.sleep(client.build_interval)
diff --git a/tempest/config.py b/tempest/config.py
index 2f2c2e9..0d49b51 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -452,6 +452,10 @@
     cfg.BoolOpt('shelve',
                 default=True,
                 help="Does the test environment support shelving/unshelving?"),
+    cfg.BoolOpt('shelve_migrate',
+                default=False,
+                help="Does the test environment support "
+                     "cold migration of unshelved server?"),
     cfg.BoolOpt('suspend',
                 default=True,
                 help="Does the test environment support suspend/resume?"),
@@ -783,29 +787,37 @@
 NetworkFeaturesGroup = [
     cfg.BoolOpt('ipv6',
                 default=True,
-                help="Allow the execution of IPv6 tests"),
+                help="Allow the execution of IPv6 tests."),
     cfg.ListOpt('api_extensions',
                 default=['all'],
                 help="A list of enabled network extensions with a special "
                      "entry all which indicates every extension is enabled. "
                      "Empty list indicates all extensions are disabled. "
-                     "To get the list of extensions run: 'neutron ext-list'"),
+                     "To get the list of extensions run: "
+                     "'openstack extension list --network'"),
+    cfg.ListOpt('available_features',
+                default=['all'],
+                help="A list of available network features with a special "
+                     "entry all that indicates every feature is available. "
+                     "Empty list indicates all features are disabled. "
+                     "This list can contain features that are not "
+                     "discoverable through the API."),
     cfg.BoolOpt('ipv6_subnet_attributes',
                 default=False,
                 help="Allow the execution of IPv6 subnet tests that use "
                      "the extended IPv6 attributes ipv6_ra_mode "
-                     "and ipv6_address_mode"
+                     "and ipv6_address_mode."
                 ),
     cfg.BoolOpt('port_admin_state_change',
                 default=True,
-                help="Does the test environment support changing"
-                     " port admin state"),
+                help="Does the test environment support changing "
+                     "port admin state?"),
     cfg.BoolOpt('port_security',
                 default=False,
                 help="Does the test environment support port security?"),
     cfg.BoolOpt('floating_ips',
                 default=True,
-                help='Does the test environment support floating_ips'),
+                help='Does the test environment support floating_ips?'),
     cfg.StrOpt('qos_placement_physnet', default=None,
                help='Name of the physnet for placement based minimum '
                     'bandwidth allocation.'),
@@ -858,10 +870,17 @@
     cfg.StrOpt('image_ssh_user',
                default="root",
                help="User name used to authenticate to an instance."),
+    cfg.StrOpt('image_alt_ssh_user',
+               default="root",
+               help="User name used to authenticate to an alt instance."),
     cfg.StrOpt('image_ssh_password',
                default="password",
                help="Password used to authenticate to an instance.",
                secret=True),
+    cfg.StrOpt('image_alt_ssh_password',
+               default="password",
+               help="Password used to authenticate to an alt instance.",
+               secret=True),
     cfg.StrOpt('ssh_shell_prologue',
                default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
                help="Shell fragments to use before executing a command "
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 5cf0f8a..f4c01ee 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -79,3 +79,6 @@
 check_tag_existence = copy.deepcopy(servers270.check_tag_existence)
 update_tag = copy.deepcopy(servers270.update_tag)
 delete_tag = copy.deepcopy(servers270.delete_tag)
+attach_volume = copy.deepcopy(servers270.attach_volume)
+show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index 6e491e9..ae7ebc4 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -76,3 +76,6 @@
 check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
 update_tag = copy.deepcopy(servers271.update_tag)
 delete_tag = copy.deepcopy(servers271.delete_tag)
+attach_volume = copy.deepcopy(servers271.attach_volume)
+show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/volume/groups.py b/tempest/lib/api_schema/response/volume/groups.py
index cb31269..f6e4bc2 100644
--- a/tempest/lib/api_schema/response/volume/groups.py
+++ b/tempest/lib/api_schema/response/volume/groups.py
@@ -64,7 +64,10 @@
                         'type': 'array',
                         'items': {'type': 'string', 'format': 'uuid'}
                     },
-                    'replication_status': {'type': 'string'}
+                    # TODO(zhufl): replication_status is added in 3.38, we
+                    # should move it to the 3.38 schema file when microversion
+                    # is supported in volume interfaces
+                    'replication_status': {'type': ['string', 'null']}
                 },
                 'additionalProperties': False,
                 'required': ['status', 'description', 'created_at',
@@ -129,6 +132,10 @@
                             'type': 'array',
                             'items': {'type': 'string', 'format': 'uuid'}
                         },
+                        # TODO(zhufl): replication_status is added in 3.38, we
+                        # should move it to the 3.38 schema file when
+                        # microversion is supported in volume interfaces
+                        'replication_status': {'type': ['string', 'null']}
                     },
                     'additionalProperties': False,
                     'required': ['status', 'description', 'created_at',
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 3fee489..7c279ab 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -391,7 +391,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
@@ -524,7 +524,7 @@
         """
         if auth_data is None:
             auth_data = self.get_auth()
-        token, _auth_data = auth_data
+        _, _auth_data = auth_data
         service = filters.get('service')
         region = filters.get('region')
         name = filters.get('name')
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
 import subprocess
 
 from oslo_log import log as logging
-import six
 
 from tempest.lib import base
 import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
                     flags, action, params])
     cmd = cmd.strip()
     LOG.info("running: '%s'", cmd)
-    if six.PY2:
-        cmd = cmd.encode('utf-8')
     cmd = shlex.split(cmd)
     stdout = subprocess.PIPE
     stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
                                        cmd,
                                        result,
                                        result_err)
-    if six.PY2:
-        return result
-    else:
-        return os.fsdecode(result)
+    return os.fsdecode(result)
 
 
 class CLIClient(object):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index b34066f..ff09671 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -225,8 +225,7 @@
 
     def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
         import_list = [node for node in src_parsed.body
-                       if isinstance(node, ast.Import) or
-                       isinstance(node, ast.ImportFrom)]
+                       if isinstance(node, (ast.Import, ast.ImportFrom))]
 
         if not import_list:
             print("(WARNING) %s: The file is not valid as it does not contain "
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 0513e90..b47b511 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -507,7 +507,7 @@
             if not hasattr(body, "keys") or len(body.keys()) != 1:
                 return body
             # Just return the "wrapped" element
-            first_key, first_item = six.next(six.iteritems(body))
+            _, first_item = six.next(six.iteritems(body))
             if isinstance(first_item, (dict, list)):
                 return first_item
         except (ValueError, IndexError):
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index b47d40d..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,13 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import six
-
-if six.PY2:
-    # module thread is removed in Python 3
-    from thread import get_ident  # noqa: H237,F401
-
-else:
-    # On Python3 thread module has been deprecated and get_ident has been moved
-    # to threading module
-    from threading import get_ident  # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident  # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 7f94612..44b55eb 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -129,7 +129,7 @@
     :rtype: string
     """
     guid = []
-    for i in range(8):
+    for _ in range(8):
         guid.append("%02x" % random.randint(0x00, 0xff))
     return ':'.join(guid)
 
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index 2a9f3a9..4cf8351 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -80,10 +80,19 @@
 
 def call_and_ignore_notfound_exc(func, *args, **kwargs):
     """Call the given function and pass if a `NotFound` exception is raised."""
-    try:
-        return func(*args, **kwargs)
-    except exceptions.NotFound:
-        pass
+    attempt = 0
+    while True:
+        attempt += 1
+        try:
+            return func(*args, **kwargs)
+        except exceptions.NotFound:
+            return
+        except exceptions.ServerFault:
+            # NOTE(danms): Tolerate three ServerFault exceptions while trying
+            # to do this thing, and after that, assume it's legit.
+            if attempt >= 3:
+                raise
+            LOG.warning('Got ServerFault while running %s, retrying...', func)
 
 
 def call_until_true(func, duration, sleep_for, *args, **kwargs):
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 6723516..e82b58f 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -646,7 +646,7 @@
 
         For a full list of available parameters, please refer to the official
         API reference:
-        https://docs.openstack.org/api-ref/compute/#create-remote-console
+        https://docs.openstack.org/api-ref/compute/#create-console
         """
         param = {
             'remote_console': {
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index f823b21..2cfb24a 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -110,6 +110,6 @@
 
     def check_group_user_existence(self, group_id, user_id):
         """Check user in group."""
-        resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+        resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 3949437..f937ed6 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -51,7 +51,7 @@
     def check_user_inherited_project_role_on_domain(
             self, domain_id, user_id, role_id):
         """Checks whether a user has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/users/%s/roles/%s/inherited_to_projects"
             % (domain_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -88,7 +88,7 @@
     def check_group_inherited_project_role_on_domain(
             self, domain_id, group_id, role_id):
         """Checks whether a group has an inherited project role on a domain."""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/domains/%s/groups/%s/roles/%s/inherited_to_projects"
             % (domain_id, group_id, role_id))
         self.expected_success(204, resp.status)
@@ -115,7 +115,7 @@
     def check_user_has_flag_on_inherited_to_project(
             self, project_id, user_id, role_id):
         """Check if user has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
             % (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
@@ -142,7 +142,7 @@
     def check_group_has_flag_on_inherited_to_project(
             self, project_id, group_id, role_id):
         """Check if group has an inherited project role on project"""
-        resp, body = self.head(
+        resp, _ = self.head(
             "OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
             % (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index 6ca401b..722deca 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -71,7 +71,7 @@
         normalized_params = '&'.join(parameter_parts)
 
         # normalize_uri
-        scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+        scheme, netloc, path, params, _, _ = urlparse.urlparse(uri)
         scheme = scheme.lower()
         netloc = netloc.lower()
         path = path.replace('//', '/')
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index f9356be..0d7593a 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -122,16 +122,16 @@
     def check_user_role_existence_on_project(self, project_id,
                                              user_id, role_id):
         """Check role of a user on a project."""
-        resp, body = self.head('projects/%s/users/%s/roles/%s' %
-                               (project_id, user_id, role_id))
+        resp, _ = self.head('projects/%s/users/%s/roles/%s' %
+                            (project_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_user_role_existence_on_domain(self, domain_id,
                                             user_id, role_id):
         """Check role of a user on a domain."""
-        resp, body = self.head('domains/%s/users/%s/roles/%s' %
-                               (domain_id, user_id, role_id))
+        resp, _ = self.head('domains/%s/users/%s/roles/%s' %
+                            (domain_id, user_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -182,16 +182,16 @@
     def check_role_from_group_on_project_existence(self, project_id,
                                                    group_id, role_id):
         """Check role of a group on a project."""
-        resp, body = self.head('projects/%s/groups/%s/roles/%s' %
-                               (project_id, group_id, role_id))
+        resp, _ = self.head('projects/%s/groups/%s/roles/%s' %
+                            (project_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def check_role_from_group_on_domain_existence(self, domain_id,
                                                   group_id, role_id):
         """Check role of a group on a domain."""
-        resp, body = self.head('domains/%s/groups/%s/roles/%s' %
-                               (domain_id, group_id, role_id))
+        resp, _ = self.head('domains/%s/groups/%s/roles/%s' %
+                            (domain_id, group_id, role_id))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
@@ -232,14 +232,14 @@
 
     def check_role_inference_rule(self, prior_role, implies_role):
         """Check a role inference rule."""
-        resp, body = self.head('roles/%s/implies/%s' %
-                               (prior_role, implies_role))
+        resp, _ = self.head('roles/%s/implies/%s' %
+                            (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
 
     def delete_role_inference_rule(self, prior_role, implies_role):
         """Delete a role inference rule."""
-        resp, body = self.delete('roles/%s/implies/%s' %
-                                 (prior_role, implies_role))
+        resp, _ = self.delete('roles/%s/implies/%s' %
+                              (prior_role, implies_role))
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
index 5c20c57..daeaeab 100644
--- a/tempest/lib/services/placement/__init__.py
+++ b/tempest/lib/services/placement/__init__.py
@@ -14,5 +14,7 @@
 
 from tempest.lib.services.placement.placement_client import \
     PlacementClient
+from tempest.lib.services.placement.resource_providers_client import \
+    ResourceProvidersClient
 
-__all__ = ['PlacementClient']
+__all__ = ['PlacementClient', 'ResourceProvidersClient']
diff --git a/tempest/lib/services/placement/resource_providers_client.py b/tempest/lib/services/placement/resource_providers_client.py
new file mode 100644
index 0000000..56f6409
--- /dev/null
+++ b/tempest/lib/services/placement/resource_providers_client.py
@@ -0,0 +1,82 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class ResourceProvidersClient(base_placement_client.BasePlacementClient):
+    """Client class for resource provider related methods
+
+    This client class aims to support read-only API operations for resource
+    providers. The following resources are supported:
+    * resource providers
+    * resource provider inventories
+    * resource provider aggregates
+    """
+
+    def list_resource_providers(self, **params):
+        """List resource providers.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-providers
+        """
+        url = '/resource_providers'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_resource_provider(self, rp_uuid):
+        """Show resource provider.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#show-resource-provider
+        """
+        url = '/resource_providers/%s' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_inventories(self, rp_uuid):
+        """List resource provider inventories.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-inventories
+        """
+        url = '/resource_providers/%s/inventories' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_aggregates(self, rp_uuid):
+        """List resource provider aggregates.
+
+        For full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/placement/#list-resource-provider-aggregates
+        """
+        url = '/resource_providers/%s/aggregates' % rp_uuid
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 4ed5eb1..2efb0da 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -302,5 +302,5 @@
     def retype_volume(self, volume_id, **kwargs):
         """Updates volume with new volume type."""
         post_body = json.dumps({'os-retype': kwargs})
-        resp, body = self.post('volumes/%s/action' % volume_id, post_body)
+        resp, _ = self.post('volumes/%s/action' % volume_id, post_body)
         self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 7fa24a4..1ebd447 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -65,6 +65,19 @@
         self.validate_response(schema.show_volume_type, resp, body)
         return rest_client.ResponseBody(resp, body)
 
+    def show_default_volume_type(self):
+        """Returns the details of a single volume type.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-default-volume-type
+        """
+        url = "types/default"
+        resp, body = self.get(url)
+        body = json.loads(body)
+        self.validate_response(schema.show_volume_type, resp, body)
+        return rest_client.ResponseBody(resp, body)
+
     def create_volume_type(self, **kwargs):
         """Create volume type.
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 13ad4c1..4e8cc54 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -161,7 +161,7 @@
                         client.delete_port, port['id'])
         return port
 
-    def create_keypair(self, client=None):
+    def create_keypair(self, client=None, **kwargs):
         """Creates keypair
 
         Keypair is a public key of OpenSSH key pair used for accessing
@@ -171,10 +171,11 @@
         """
         if not client:
             client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
+        if not kwargs.get('name'):
+            kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
         # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
+        body = client.create_keypair(**kwargs)
+        self.addCleanup(client.delete_keypair, kwargs['name'])
         return body['keypair']
 
     def create_server(self, name=None, image_id=None, flavor=None,
@@ -307,7 +308,7 @@
         return server
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
-                      imageRef=None, volume_type=None):
+                      imageRef=None, volume_type=None, **kwargs):
         """Creates volume
 
         This wrapper utility creates volume and waits for volume to be
@@ -327,11 +328,11 @@
             size = max(size, min_disk)
         if name is None:
             name = data_utils.rand_name(self.__class__.__name__ + "-volume")
-        kwargs = {'display_name': name,
-                  'snapshot_id': snapshot_id,
-                  'imageRef': imageRef,
-                  'volume_type': volume_type,
-                  'size': size}
+        kwargs.update({'name': name,
+                       'snapshot_id': snapshot_id,
+                       'imageRef': imageRef,
+                       'volume_type': volume_type,
+                       'size': size})
 
         if CONF.compute.compute_volume_common_az:
             kwargs.setdefault('availability_zone',
@@ -423,7 +424,7 @@
         snapshot = self.snapshots_client.create_snapshot(
             volume_id=volume_id,
             force=force,
-            display_name=name,
+            name=name,
             description=description,
             metadata=metadata)['snapshot']
 
@@ -455,7 +456,8 @@
             admin_volumes_client.wait_for_resource_deletion(volume['id'])
         admin_volume_type_client.delete_volume_type(volume_type['id'])
 
-    def create_volume_type(self, client=None, name=None, backend_name=None):
+    def create_volume_type(self, client=None, name=None, backend_name=None,
+                           **kwargs):
         """Creates volume type
 
         In a multiple-storage back-end configuration,
@@ -482,12 +484,14 @@
 
         LOG.debug("Creating a volume type: %s on backend %s",
                   randomized_name, backend_name)
-        extra_specs = {}
+        extra_specs = kwargs.pop("extra_specs", {})
         if backend_name:
-            extra_specs = {"volume_backend_name": backend_name}
+            extra_specs.update({"volume_backend_name": backend_name})
 
-        volume_type = client.create_volume_type(
-            name=randomized_name, extra_specs=extra_specs)['volume_type']
+        volume_type_resp = client.create_volume_type(
+            name=randomized_name, extra_specs=extra_specs, **kwargs)
+        volume_type = volume_type_resp['volume_type']
+
         self.assertIn('id', volume_type)
         self.addCleanup(self._cleanup_volume_type, volume_type)
         return volume_type
@@ -574,7 +578,7 @@
         linux_client.validate_authentication()
         return linux_client
 
-    def image_create(self, name='scenario-img'):
+    def image_create(self, name='scenario-img', **kwargs):
         img_path = CONF.scenario.img_file
         if not os.path.exists(img_path):
             # TODO(kopecmartin): replace LOG.warning for rasing
@@ -614,6 +618,7 @@
             # Additional properties are flattened out in the v2 API.
             if img_properties:
                 params.update(img_properties)
+        params.update(kwargs)
         body = self.image_client.create_image(**params)
         image = body['image'] if 'image' in body else body
         self.addCleanup(self.image_client.delete_image, image['id'])
@@ -626,7 +631,7 @@
         LOG.debug("image:%s", image['id'])
         return image['id']
 
-    def _log_console_output(self, servers=None, client=None):
+    def _log_console_output(self, servers=None, client=None, **kwargs):
         """Console log output"""
         if not CONF.compute_feature_enabled.console_output:
             LOG.debug('Console output not supported, cannot log')
@@ -638,7 +643,7 @@
         for server in servers:
             try:
                 console_output = client.get_console_output(
-                    server['id'])['output']
+                    server['id'], **kwargs)['output']
                 LOG.debug('Console output for %s\nbody=\n%s',
                           server['id'], console_output)
             except lib_exc.NotFound:
@@ -698,17 +703,20 @@
                   image_name, server['name'])
         return snapshot_image
 
-    def nova_volume_attach(self, server, volume_to_attach):
+    def nova_volume_attach(self, server, volume_to_attach, **kwargs):
         """Compute volume attach
 
         This utility attaches volume from compute and waits for the
         volume status to be 'in-use' state.
         """
         volume = self.servers_client.attach_volume(
-            server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
+            server['id'], volumeId=volume_to_attach['id'],
+            **kwargs)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'in-use')
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.nova_volume_detach, server, volume)
         # Return the updated volume after the attachment
         return self.volumes_client.show_volume(volume['id'])['volume']
 
@@ -811,13 +819,15 @@
                 LOG.exception(extra_msg)
                 raise
 
-    def create_floating_ip(self, server, pool_name=None):
+    def create_floating_ip(self, server, pool_name=None, **kwargs):
         """Create a floating IP and associates to a server on Nova"""
 
         if not pool_name:
             pool_name = CONF.network.floating_network_name
+
         floating_ip = (self.compute_floating_ips_client.
-                       create_floating_ip(pool=pool_name)['floating_ip'])
+                       create_floating_ip(pool=pool_name,
+                                          **kwargs)['floating_ip'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.compute_floating_ips_client.delete_floating_ip,
                         floating_ip['id'])
@@ -826,18 +836,20 @@
         return floating_ip
 
     def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                         private_key=None, server=None):
+                         private_key=None, server=None, username=None,
+                         fs='ext4'):
         """Creates timestamp
 
         This wrapper utility does ssh, creates timestamp and returns the
         created timestamp.
         """
-
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
-                                            server=server)
+                                            server=server,
+                                            username=username)
+
         if dev_name is not None:
-            ssh_client.make_fs(dev_name)
+            ssh_client.make_fs(dev_name, fs=fs)
             ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
                                                                mount_path))
         cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
@@ -866,18 +878,22 @@
             ssh_client.exec_command('sudo umount %s' % mount_path)
         return timestamp
 
-    def get_server_ip(self, server):
+    def get_server_ip(self, server, **kwargs):
         """Get the server fixed or floating IP.
 
         Based on the configuration we're in, return a correct ip
         address for validating that a guest is up.
+
+        If CONF.validation.connect_method is floating, then
+        a floating ip will be created passing kwargs as additional
+        argument.
         """
 
         if CONF.validation.connect_method == 'floating':
             # The tests calling this method don't have a floating IP
             # and can't make use of the validation resources. So the
             # method is creating the floating IP there.
-            return self.create_floating_ip(server)['ip']
+            return self.create_floating_ip(server, **kwargs)['ip']
         elif CONF.validation.connect_method == 'fixed':
             # Determine the network name to look for based on config or creds
             # provider network resources.
@@ -917,14 +933,14 @@
                                     keypair=None,
                                     security_group=None,
                                     delete_on_termination=False,
-                                    name=None):
+                                    name=None, **kwargs):
         """Boot instance from resource
 
         This wrapper utility boots instance from resource with block device
         mapping with source info passed in arguments
         """
 
-        create_kwargs = dict()
+        create_kwargs = dict({'image_id': ''})
         if keypair:
             create_kwargs['key_name'] = keypair['name']
         if security_group:
@@ -936,8 +952,9 @@
             delete_on_termination=delete_on_termination))
         if name:
             create_kwargs['name'] = name
+        create_kwargs.update(kwargs)
 
-        return self.create_server(image_id='', **create_kwargs)
+        return self.create_server(**create_kwargs)
 
     def create_volume_from_image(self):
         """Create volume from image"""
@@ -1064,14 +1081,13 @@
 
         return subnet
 
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        if ip_addr:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'],
-                fixed_ips='ip_address=%s' % ip_addr)['ports']
-        else:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'])['ports']
+    def _get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
+
+        if ip_addr and not kwargs.get('fixed_ips'):
+            kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+        ports = self.os_admin.ports_client.list_ports(
+            device_id=server['id'], **kwargs)['ports']
+
         # A port can have more than one IP address in some cases.
         # If the network is dual-stack (IPv4 + IPv6), this port is associated
         # with 2 subnets
@@ -1110,7 +1126,7 @@
         return net[0]
 
     def create_floating_ip(self, server, external_network_id=None,
-                           port_id=None, client=None):
+                           port_id=None, client=None, **kwargs):
         """Create a floating IP and associates to a resource/port on Neutron"""
 
         if not external_network_id:
@@ -1122,15 +1138,17 @@
         else:
             ip4 = None
 
-        kwargs = {
+        floatingip_kwargs = {
             'floating_network_id': external_network_id,
             'port_id': port_id,
             'tenant_id': server.get('project_id') or server['tenant_id'],
             'fixed_ip_address': ip4,
         }
         if CONF.network.subnet_id:
-            kwargs['subnet_id'] = CONF.network.subnet_id
-        result = client.create_floatingip(**kwargs)
+            floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+        floatingip_kwargs.update(kwargs)
+        result = client.create_floatingip(**floatingip_kwargs)
         floating_ip = result['floatingip']
 
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index b515639..58e234f 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -51,10 +51,27 @@
         return aggregate
 
     def _get_host_name(self):
+        # Find a host that has not been added to other availability zone,
+        # for one host can't be added to different availability zones.
         svc_list = self.services_client.list_services(
             binary='nova-compute')['services']
         self.assertNotEmpty(svc_list)
-        return svc_list[0]['host']
+        hosts_available = []
+        for host in svc_list:
+            if (host['state'] == 'up' and host['status'] == 'enabled'):
+                hosts_available.append(host['host'])
+        aggregates = self.aggregates_client.list_aggregates()['aggregates']
+        hosts_in_zone = []
+        for agg in aggregates:
+            if agg['availability_zone']:
+                hosts_in_zone.extend(agg['hosts'])
+        hosts = [v for v in hosts_available if v not in hosts_in_zone]
+        if not hosts:
+            raise self.skipException("All hosts are already in other "
+                                     "availability zones, so can't add "
+                                     "host to aggregate. \nAggregates list: "
+                                     "%s" % aggregates)
+        return hosts[0]
 
     def _add_host(self, aggregate_id, host):
         aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index 5eab1da..74d4ed9 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -178,7 +178,13 @@
         for rp, resources in allocations['allocations'].items():
             if self.INGRESS_RESOURCE_CLASS in resources['resources']:
                 bw_resource_in_alloc = True
+                allocation_rp = rp
         self.assertTrue(bw_resource_in_alloc)
+        # Check that binding_profile of the port is not empty and equals with
+        # the rp uuid
+        port = self.os_admin.ports_client.show_port(valid_port['id'])
+        self.assertEqual(allocation_rp,
+                         port['port']['binding:profile']['allocation'])
 
         # boot another vm with max int bandwidth
         not_valid_port = self.create_port(
@@ -196,3 +202,6 @@
         server2 = self.servers_client.show_server(server2['id'])
         self.assertIn('fault', server2['server'])
         self.assertIn('No valid host', server2['server']['fault']['message'])
+        # Check that binding_profile of the port is empty
+        port = self.os_admin.ports_client.show_port(not_valid_port['id'])
+        self.assertEqual(0, len(port['port']['binding:profile']))
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e26dc9d..dbab212 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -80,8 +80,8 @@
         return floating_ip
 
     def _check_network_connectivity(self, server, keypair, floating_ip,
-                                    should_connect=True):
-        username = CONF.validation.image_ssh_user
+                                    should_connect=True,
+                                    username=CONF.validation.image_ssh_user):
         private_key = keypair['private_key']
         self.check_tenant_network_connectivity(
             server, username, private_key,
@@ -95,12 +95,13 @@
                                    'Public network connectivity check failed',
                                    server)
 
-    def _wait_server_status_and_check_network_connectivity(self, server,
-                                                           keypair,
-                                                           floating_ip):
+    def _wait_server_status_and_check_network_connectivity(
+        self, server, keypair, floating_ip,
+        username=CONF.validation.image_ssh_user):
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
-        self._check_network_connectivity(server, keypair, floating_ip)
+        self._check_network_connectivity(server, keypair, floating_ip,
+                                         username=username)
 
     @decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
     @decorators.attr(type='slow')
@@ -137,10 +138,11 @@
         server = self._setup_server(keypair)
         floating_ip = self._setup_network(server, keypair)
         image_ref_alt = CONF.compute.image_ref_alt
+        username_alt = CONF.validation.image_alt_ssh_user
         self.servers_client.rebuild_server(server['id'],
                                            image_ref=image_ref_alt)
         self._wait_server_status_and_check_network_connectivity(
-            server, keypair, floating_ip)
+            server, keypair, floating_ip, username_alt)
 
     @decorators.idempotent_id('2b2642db-6568-4b35-b812-eceed3fa20ce')
     @testtools.skipUnless(CONF.compute_feature_enabled.pause,
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..ed06898 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -33,9 +33,18 @@
      * shelve the instance
      * unshelve the instance
      * check the existence of the timestamp file in the unshelved instance
+     * check the existence of the timestamp file in the unshelved instance,
+       after a cold migrate
 
     """
 
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(TestShelveInstance, cls).setup_clients()
+        cls.admin_servers_client = cls.os_admin.servers_client
+
     @classmethod
     def skip_checks(cls):
         super(TestShelveInstance, cls).skip_checks()
@@ -50,7 +59,21 @@
         waiters.wait_for_server_status(self.servers_client, server['id'],
                                        'ACTIVE')
 
-    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
+    def _cold_migrate_server(self, server):
+        src_host = self.get_host_for_server(server['id'])
+
+        self.admin_servers_client.migrate_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'VERIFY_RESIZE')
+        self.servers_client.confirm_resize_server(server['id'])
+        waiters.wait_for_server_status(self.servers_client,
+                                       server['id'], 'ACTIVE')
+
+        dst_host = self.get_host_for_server(server['id'])
+        self.assertNotEqual(src_host, dst_host)
+
+    def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False,
+                                                cold_migrate=False):
         keypair = self.create_keypair()
 
         security_group = self._create_security_group()
@@ -71,6 +94,10 @@
         # with the instance snapshot
         self._shelve_then_unshelve_server(server)
 
+        if cold_migrate:
+            # Prevent bug #1732428 from coming back
+            self._cold_migrate_server(server)
+
         timestamp2 = self.get_timestamp(instance_ip,
                                         private_key=keypair['private_key'],
                                         server=server)
@@ -91,3 +118,18 @@
     @utils.services('compute', 'volume', 'network', 'image')
     def test_shelve_volume_backed_instance(self):
         self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('1295fd9e-193a-4cf8-b211-55358e021bae')
+    @testtools.skipUnless(CONF.network.public_network_id,
+                          'The public_network_id option must be specified.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+                          'Cold migration not available.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.shelve_migrate,
+                          'Shelve migrate not available.')
+    @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+                          'Less than 2 compute nodes, skipping multinode '
+                          'tests.')
+    @utils.services('compute', 'network', 'image')
+    def test_cold_migrate_unshelved_instance(self):
+        self._create_server_then_shelve_and_unshelve(cold_migrate=True)
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 5d9ddfa..3c99bbe 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -29,10 +29,6 @@
 from tempest.lib.common.utils import data_utils
 from tempest.tests import base
 
-if six.PY2:
-    # Python 2 has not FileNotFoundError exception
-    FileNotFoundError = IOError
-
 DEVNULL = open(os.devnull, 'wb')
 atexit.register(DEVNULL.close)
 
@@ -149,8 +145,7 @@
         ]
         # NOTE(mtreinish): on python 3 the subprocess prints b'' around
         # stdout.
-        if six.PY3:
-            result = ["b\'" + x + "\'" for x in result]
+        result = ["b\'" + x + "\'" for x in result]
         self.assertEqual(result, tests)
 
     def test_tempest_run_with_worker_file(self):
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 73924bd..f45eec0 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -55,6 +55,56 @@
                           waiters.wait_for_image_status,
                           self.client, 'fake_image_id', 'active')
 
+    def test_wait_for_image_imported_to_stores(self):
+        self.client.show_image.return_value = ({'status': 'active',
+                                                'stores': 'fake_store'})
+        start_time = int(time.time())
+        waiters.wait_for_image_imported_to_stores(
+            self.client, 'fake_image_id', 'fake_store')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_imported_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        client = mock.MagicMock()
+        client.build_timeout = 2
+        self.patch('time.time', side_effect=[0., 1., 2.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        client.show_image.return_value = ({
+            'status': 'saving',
+            'stores': 'fake_store',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_imported_to_stores,
+                          client, 'fake_image_id', 'fake_store')
+
+    def test_wait_for_image_copied_to_stores(self):
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': '',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        start_time = int(time.time())
+        waiters.wait_for_image_copied_to_stores(
+            self.client, 'fake_image_id')
+        end_time = int(time.time())
+        # Ensure waiter returns before build_timeout
+        self.assertLess((end_time - start_time), 10)
+
+    def test_wait_for_image_copied_to_stores_timeout(self):
+        time_mock = self.patch('time.time')
+        self.patch('time.time', side_effect=[0., 1.])
+        time_mock.side_effect = utils.generate_timeout_series(1)
+
+        self.client.show_image.return_value = ({
+            'status': 'active',
+            'os_glance_importing_to_stores': 'processing',
+            'os_glance_failed_import': 'fake_os_glance_failed_import'})
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_image_copied_to_stores,
+                          self.client, 'fake_image_id')
+
 
 class TestInterfaceWaiters(base.TestCase):
 
@@ -131,6 +181,36 @@
                                           mock.call('server_id')])
         sleep.assert_called_once_with(client.build_interval)
 
+    def test_wait_for_guest_os_boot(self):
+        get_console_output = mock.Mock(
+            side_effect=[
+                {'output': 'os not ready yet\n'},
+                {'output': 'login:\n'}
+            ])
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', return_value=0.)
+        sleep = self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        get_console_output.assert_has_calls([
+            mock.call('server_id'), mock.call('server_id')])
+        sleep.assert_called_once_with(client.build_interval)
+        log_info.assert_not_called()
+
+    def test_wait_for_guest_os_boot_timeout(self):
+        get_console_output = mock.Mock(
+            return_value={'output': 'os not ready yet\n'})
+        client = self.mock_client(get_console_output=get_console_output)
+        self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+        self.patch('time.sleep')
+
+        with mock.patch.object(waiters.LOG, "info") as log_info:
+            waiters.wait_for_guest_os_boot(client, 'server_id')
+
+        log_info.assert_called_once()
+
 
 class TestVolumeWaiters(base.TestCase):
     vol_migrating_src_host = {
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index bdc0ea4..d8e3745 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -74,6 +74,17 @@
         self.assertRaises(ValueError, test_utils.call_and_ignore_notfound_exc,
                           raise_value_error)
 
+    def test_call_and_ignore_notfound_exc_when_serverfault_raised(self):
+        calls = []
+
+        def raise_serverfault():
+            calls.append('call')
+            raise exceptions.ServerFault()
+        self.assertRaises(exceptions.ServerFault,
+                          test_utils.call_and_ignore_notfound_exc,
+                          raise_serverfault)
+        self.assertEqual(3, len(calls))
+
     def test_call_and_ignore_notfound_exc(self):
         m = mock.Mock(return_value=42)
         args, kwargs = (1,), {'1': None}
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
index ca15dd1..0efc462 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
@@ -54,12 +54,44 @@
     }
 
     FAKE_SERVICE_ID = "a4dc5060-f757-4662-b658-edd2aefbb41d"
+    FAKE_ENDPOINT_ID = "b335d394-cdb9-4519-b95d-160b7706e54ew"
+
+    FAKE_UPDATE_ENDPOINT = {
+        "endpoint": {
+            "id": "828384",
+            "interface": "internal",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region_id": "north",
+            "service_id": "686766",
+            "url": "http://example.com/identity/v3/"
+                   "endpoints/828384"
+        }
+    }
+
+    FAKE_SHOW_ENDPOINT = {
+        "endpoint": {
+            "enabled": True,
+            "id": "01c3d5b92f7841ac83fb4b26173c12c7",
+            "interface": "admin",
+            "links": {
+                "self": "http://example.com/identity/v3/"
+                        "endpoints/828384"
+            },
+            "region": "RegionOne",
+            "region_id": "RegionOne",
+            "service_id": "3b2d6ad7e02c4cde8498a547601f1b8f",
+            "url": "http://23.253.211.234:9696/"
+        }
+    }
 
     def setUp(self):
         super(TestEndpointsClient, self).setUp()
         fake_auth = fake_auth_provider.FakeAuthProvider()
-        self.client = endpoints_client.EndPointsClient(fake_auth,
-                                                       'identity', 'regionOne')
+        self.client = endpoints_client.EndPointsClient(
+            fake_auth, 'identity', 'regionOne')
 
     def _test_create_endpoint(self, bytes_body=False):
         self.check_service_client_function(
@@ -84,6 +116,38 @@
             mock_args=[mock_args],
             **params)
 
+    def _test_update_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.update_endpoint,
+            'tempest.lib.common.rest_client.RestClient.patch',
+            self.FAKE_UPDATE_ENDPOINT,
+            bytes_body,
+            endpoint_id=self.FAKE_ENDPOINT_ID,
+            interface="public",
+            region_id="north",
+            url="http://example.com/identity/v3/endpoints/828384",
+            service_id=self.FAKE_SERVICE_ID)
+
+    def _test_show_endpoint(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_endpoint,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_SHOW_ENDPOINT,
+            bytes_body,
+            endpoint_id="3456")
+
+    def test_update_endpoint_with_str_body(self):
+        self._test_update_endpoint()
+
+    def test_update_endpoint_with_bytes_body(self):
+        self._test_update_endpoint(bytes_body=True)
+
+    def test_show_endpoint_with_str_body(self):
+        self._test_show_endpoint()
+
+    def test_show_endpoint_with_bytes_body(self):
+        self._test_show_endpoint(bytes_body=True)
+
     def test_create_endpoint_with_str_body(self):
         self._test_create_endpoint()
 
diff --git a/tempest/tests/lib/services/network/test_floating_ips_client.py b/tempest/tests/lib/services/network/test_floating_ips_client.py
index c5b1845..e8f2e5a 100644
--- a/tempest/tests/lib/services/network/test_floating_ips_client.py
+++ b/tempest/tests/lib/services/network/test_floating_ips_client.py
@@ -27,6 +27,8 @@
             {
                 "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip",
                 "created_at": "2016-12-21T10:55:50Z",
                 "updated_at": "2016-12-21T10:55:53Z",
                 "revision_number": 1,
@@ -37,11 +39,24 @@
                 "floating_ip_address": "172.24.4.228",
                 "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
                 "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
-                "status": "ACTIVE"
+                "status": "ACTIVE",
+                "port_details": {
+                    "status": "ACTIVE",
+                    "name": "",
+                    "admin_state_up": True,
+                    "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+                    "device_owner": "compute:nova",
+                    "mac_address": "fa:16:3e:b1:3b:30",
+                    "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+                },
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             },
             {
                 "router_id": None,
                 "description": "for test",
+                "dns_domain": "my-domain.org.",
+                "dns_name": "myfip2",
                 "created_at": "2016-12-21T11:55:50Z",
                 "updated_at": "2016-12-21T11:55:53Z",
                 "revision_number": 2,
@@ -52,7 +67,10 @@
                 "floating_ip_address": "172.24.4.227",
                 "port_id": None,
                 "id": "61cea855-49cb-4846-997d-801b70c71bdd",
-                "status": "DOWN"
+                "status": "DOWN",
+                "port_details": None,
+                "tags": ["tag1,tag2"],
+                "port_forwardings": []
             }
         ]
     }
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
index 078f4b0..17233bc 100644
--- a/tempest/tests/lib/services/network/test_networks_client.py
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -31,12 +31,17 @@
                     "nova"
                 ],
                 "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net1",
                 "port_security_enabled": True,
                 "project_id": "4fd44f30292945e481c7b8a0c8908869",
                 "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+                "revision_number": 1,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -46,7 +51,8 @@
                 "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": True,
-                "description": ""
+                "description": "",
+                "is_default": False
             },
             {
                 "admin_state_up": True,
@@ -54,12 +60,18 @@
                 "availability_zones": [
                     "nova"
                 ],
+                "created_at": "2016-03-08T20:19:41",
+                "dns_domain": "my-domain.org.",
                 "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+                "ipv4_address_scope": None,
+                "ipv6_address_scope": None,
+                "l2_adjacency": False,
                 "mtu": 0,
                 "name": "net2",
                 "port_security_enabled": True,
                 "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+                "revision_number": 3,
                 "router:external": False,
                 "shared": False,
                 "status": "ACTIVE",
@@ -69,7 +81,8 @@
                 "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
                 "updated_at": "2016-03-08T20:19:41",
                 "vlan_transparent": False,
-                "description": ""
+                "description": "",
+                "is_default": False
             }
         ]
     }
@@ -108,6 +121,7 @@
                 "alive": True,
                 "topic": "dhcp_agent",
                 "host": "osboxes",
+                "ha_state": None,
                 "agent_type": "DHCP agent",
                 "resource_versions": {},
                 "created_at": "2017-06-19 21:39:51",
diff --git a/tempest/tests/lib/services/placement/test_resource_providers_client.py b/tempest/tests/lib/services/placement/test_resource_providers_client.py
new file mode 100644
index 0000000..11aeaf2
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_resource_providers_client.py
@@ -0,0 +1,119 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.placement import resource_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestResourceProvidersClient(base.BaseServiceTest):
+    FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
+    FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
+    FAKE_RESOURCE_PROVIDER = {
+        'generation': 0,
+        'name': 'Ceph Storage Pool',
+        'uuid': FAKE_RESOURCE_PROVIDER_UUID,
+        'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
+        'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
+    }
+
+    FAKE_RESOURCE_PROVIDERS = {
+        'resource_providers': [FAKE_RESOURCE_PROVIDER]
+    }
+
+    FAKE_RESOURCE_PROVIDER_INVENTORIES = {
+        'inventories': {
+            'DISK_GB': {
+                'allocation_ratio': 1.0,
+                'max_unit': 35,
+                'min_unit': 1,
+                'reserved': 0,
+                'step_size': 1,
+                'total': 35
+            }
+        },
+        'resource_provider_generation': 7
+    }
+
+    FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
+    FAKE_RESOURCE_PROVIDER_AGGREGATES = {
+        'aggregates': [FAKE_AGGREGATE_UUID]
+    }
+
+    def setUp(self):
+        super(TestResourceProvidersClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = resource_providers_client.ResourceProvidersClient(
+            fake_auth, 'placement', 'regionOne')
+
+    def _test_list_resource_providers(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_providers,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDERS,
+            to_utf=bytes_body,
+            status=200
+        )
+
+    def test_list_resource_providers_with_bytes_body(self):
+        self._test_list_resource_providers()
+
+    def test_list_resource_providers_with_str_body(self):
+        self._test_list_resource_providers(bytes_body=True)
+
+    def _test_show_resource_provider(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_resource_provider,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_show_resource_provider_with_str_body(self):
+        self._test_show_resource_provider()
+
+    def test_show_resource_provider_with_bytes_body(self):
+        self._test_show_resource_provider(bytes_body=True)
+
+    def _test_list_resource_provider_inventories(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_inventories,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_inventories_with_str_body(self):
+        self._test_list_resource_provider_inventories()
+
+    def test_list_resource_provider_inventories_with_bytes_body(self):
+        self._test_list_resource_provider_inventories(bytes_body=True)
+
+    def _test_list_resource_provider_aggregates(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.list_resource_provider_aggregates,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
+            to_utf=bytes_body,
+            status=200,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+        )
+
+    def test_list_resource_provider_aggregates_with_str_body(self):
+        self._test_list_resource_provider_aggregates()
+
+    def test_list_resource_provider_aggregates_with_bytes_body(self):
+        self._test_list_resource_provider_aggregates(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
index 336aa32..19d6591 100644
--- a/tempest/tests/lib/services/volume/v3/test_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -121,6 +121,13 @@
             to_utf=bytes_body,
             volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
 
+    def _test_show_default_volume_type(self, bytes_body=False):
+        self.check_service_client_function(
+            self.client.show_default_volume_type,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+            to_utf=bytes_body)
+
     def _test_create_volume_type(self, bytes_body=False):
         self.check_service_client_function(
             self.client.create_volume_type,
@@ -224,6 +231,12 @@
     def test_show_volume_type_with_bytes_body(self):
         self._test_show_volume_type(bytes_body=True)
 
+    def test_show_default_volume_type_with_str_body(self):
+        self._test_show_default_volume_type()
+
+    def test_show_default_volume_type_with_bytes_body(self):
+        self._test_show_default_volume_type(bytes_body=True)
+
     def test_create_volume_type_str_body(self):
         self._test_create_volume_type()
 
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-blacklist.txt
index 97808d9..263b2e4 100644
--- a/tools/tempest-integrated-gate-networking-blacklist.txt
+++ b/tools/tempest-integrated-gate-networking-blacklist.txt
@@ -17,8 +17,3 @@
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-blacklist.txt
index 657bda2..efba796 100644
--- a/tools/tempest-integrated-gate-placement-blacklist.txt
+++ b/tools/tempest-integrated-gate-placement-blacklist.txt
@@ -17,8 +17,3 @@
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
index cbd3e9d..1ef6bb5 100644
--- a/tools/tempest-integrated-gate-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -12,8 +12,3 @@
 tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
 tempest.scenario.test_network_v6.TestGettingAddress
 tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tox.ini b/tox.ini
index 031a400..d8e059a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@
     OS_STDERR_CAPTURE=1
     OS_TEST_TIMEOUT=160
     PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
 usedevelop = True
 install_command = pip install {opts} {packages}
 whitelist_externals = *
@@ -279,7 +279,6 @@
 [testenv:docs]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
   sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
@@ -365,7 +364,6 @@
 [testenv:releasenotes]
 deps =
   -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-  -r{toxinidir}/requirements.txt
   -r{toxinidir}/doc/requirements.txt
 commands =
   rm -rf releasenotes/build
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
new file mode 100644
index 0000000..3deb944
--- /dev/null
+++ b/zuul.d/base.yaml
@@ -0,0 +1,86 @@
+- job:
+    name: devstack-tempest
+    parent: devstack
+    description: |
+      Base Tempest job.
+
+      This Tempest job provides the base for both the single and multi-node
+      test setup. To run a multi-node test inherit from devstack-tempest and
+      set the nodeset to a multi-node one.
+    required-projects: &base_required-projects
+      - opendev.org/openstack/tempest
+    timeout: 7200
+    roles: &base_roles
+      - zuul: opendev.org/openstack/devstack
+    vars: &base_vars
+      devstack_services:
+        tempest: true
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            compute:
+              min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+      test_results_stage_name: test_results
+      zuul_copy_output:
+        '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+        '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+        '{{ devstack_base_dir }}/tempest/tempest.log': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+        '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+        '{{ stage_dir }}/stackviz': logs
+      extensions_to_txt:
+        conf: true
+        log: true
+        yaml: true
+        yml: true
+    run: playbooks/devstack-tempest.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: devstack-tempest-ipv6
+    parent: devstack-ipv6
+    description: |
+      Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+      which set the IPv6-only setting for OpenStack services. As part of
+      run phase, this job will verify the IPv6 setting and check the services
+      endpoints and listen addresses are IPv6. Basically it will run the script
+      ./tool/verify-ipv6-only-deployments.sh
+
+      Child jobs of this job can run their own set of tests and can
+      add post-run playebooks to extend the IPv6 verification specific
+      to their deployed services.
+      Check the wiki page for more details about project jobs setup
+      - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+    required-projects: *base_required-projects
+    timeout: 7200
+    roles: *base_roles
+    vars: *base_vars
+    run: playbooks/devstack-tempest-ipv6.yaml
+    post-run: playbooks/post-tempest.yaml
+
+- job:
+    name: tempest-multinode-full-base
+    parent: devstack-tempest
+    description: |
+      Base multinode integration test with Neutron networking and py27.
+      Former names for this job were:
+        * neutron-tempest-multinode-full
+        * legacy-tempest-dsvm-neutron-multinode-full
+        * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+      This job includes two nodes, controller / tempest plus a subnode, but
+      it can be used with different topologies, as long as a controller node
+      and a tempest one exist.
+    timeout: 10800
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        FORCE_CONFIG_DRIVE: false
+        NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+        LIVE_MIGRATION_AVAILABLE: true
+        USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+    group-vars:
+      peers:
+        devstack_localrc:
+          NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+          LIVE_MIGRATION_AVAILABLE: true
+          USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
new file mode 100644
index 0000000..1a1d523
--- /dev/null
+++ b/zuul.d/integrated-gate.yaml
@@ -0,0 +1,412 @@
+# NOTE(gmann): This file includes all integrated jobs definition which
+# are supposed to be run by Tempest and other projects as
+# integrated testing.
+- job:
+    name: tempest-all
+    parent: devstack-tempest
+    description: |
+      Integration test that runs all tests.
+      Former name for this job was:
+        * legacy-periodic-tempest-dsvm-all-master
+    vars:
+      tox_envlist: all
+      tempest_test_regex: tempest
+      devstack_localrc:
+        ENABLE_FILE_INJECTION: true
+
+- job:
+    name: tempest-ipv6-only
+    parent: devstack-tempest-ipv6
+    # This currently works from stable/pike on.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Integration test of IPv6-only deployments. This job runs
+      smoke and IPv6 relates tests only. Basic idea is to test
+      whether OpenStack Services listen on IPv6 addrress or not.
+    timeout: 10800
+    vars:
+      tox_envlist: ipv6-only
+
+- job:
+    name: tempest-full
+    parent: devstack-tempest
+    # This currently works from stable/pike on.
+    # Before stable/pike, legacy version of tempest-full
+    # 'legacy-tempest-dsvm-neutron-full' run.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking and py27.
+      This job is supposed to run until stable/train setup only.
+      If you are running it on stable/ussuri gate onwards for python2.7
+      coverage then you need to do override-checkout with any stable
+      branch less than or equal to stable/train.
+      Former names for this job where:
+        * legacy-tempest-dsvm-neutron-full
+        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        ENABLE_FILE_INJECTION: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        USE_PYTHON3: False
+      devstack_services:
+        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+        # since tempest-full is in the integrated-gate project template but
+        # the backup tests do not really involve other services so they should
+        # be run in some more cinder-specific job, especially because the
+        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
+    # This currently works from stable/pike on.
+    # Before stable/pike, legacy version of tempest-full
+    # 'legacy-tempest-dsvm-neutron-full' run.
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        # NOTE(mriedem): Disable the cinder-backup service from
+        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+        # project template but the backup tests do not really involve other
+        # services so they should be run in some more cinder-specific job,
+        # especially because the tests fail at a high rate (see bugs 1483434,
+        # 1813217, 1745168)
+        c-bak: false
+
+- job:
+    name: tempest-integrated-networking
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This  job runs integration tests for networking. This is subset of
+      'tempest-full-py3' job and run only Neutron and Nova related tests.
+      This is meant to be run on neutron gate only.
+    vars:
+      tox_envlist: integrated-network
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-compute
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for compute. This is
+      subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+      and Glance related tests. This is meant to be run on Nova gate only.
+    vars:
+      tox_envlist: integrated-compute
+      tempest_black_regex: ""
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-placement
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for placement. This is
+      subset of 'tempest-full-py3' job and run Nova and Neutron
+      related tests. This is meant to be run on Placement gate only.
+    vars:
+      tox_envlist: integrated-placement
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        c-bak: false
+
+- job:
+    name: tempest-integrated-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for image & block storage. This is
+      subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
+      related tests. This is meant to be run on Cinder and Glance gate only.
+    vars:
+      tox_envlist: integrated-storage
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+
+- job:
+    name: tempest-integrated-object-storage
+    parent: devstack-tempest
+    branches: ^(?!stable/ocata).*$
+    description: |
+      This job runs integration tests for object storage. This is
+      subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
+      related tests. This is meant to be run on Swift gate only.
+    vars:
+      tox_envlist: integrated-object-storage
+      devstack_localrc:
+        # NOTE(gmann): swift is not ready on python3 yet and devstack
+        # install it on python2.7 only. But settting the USE_PYTHON3
+        # for future once swift is ready on py3.
+        USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal from stable/victoria on.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-bionic
+    # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+    # This job is prepared to make sure all stable branches from stable/stein till stable/train
+    # will keep running on bionic. This can be removed once stable/train is EOL.
+    branches:
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-xenial
+    # This job runs on Xenial and this is for stable/pike, stable/queens
+    # and stable/rocky. This job is prepared to make sure all stable branches
+    # before stable/stein will keep running on xenial. This job can be
+    # removed once stable/rocky is EOL.
+    branches:
+      - stable/pike
+      - stable/queens
+      - stable/rocky
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-slow
+    parent: tempest-multinode-full
+    description: |
+      This multinode integration job will run all the tests tagged as slow.
+      It enables the lvm multibackend setup to cover few scenario tests.
+      This job will run only slow tests (API or Scenario) serially.
+
+      Former names for this job were:
+        * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+        * tempest-scenario-multinode-lvm-multibackend
+    timeout: 10800
+    vars:
+      tox_envlist: slow-serial
+      devstack_localrc:
+        CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+        ENABLE_VOLUME_MULTIATTACH: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-placement: true
+        neutron-qos: true
+      devstack_local_conf:
+        post-config:
+          "/$NEUTRON_CORE_PLUGIN_CONF":
+            ovs:
+              bridge_mappings: public:br-ex
+              resource_provider_bandwidths: br-ex:1000000:1000000
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              qos_placement_physnet: public
+      tempest_concurrency: 2
+    group-vars:
+      # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+      # the controller and subnode prior to Rocky so we have to make sure the
+      # variable is set in both locations.
+      subnode:
+        devstack_localrc:
+          ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+    name: tempest-slow-py3
+    parent: tempest-slow
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-cinder-v2-api
+    parent: devstack-tempest
+    branches:
+      - master
+    description: |
+      This job runs the cinder API test against v2 endpoint.
+    vars:
+      tox_envlist: all
+      tempest_test_regex: api.*volume
+      devstack_localrc:
+        TEMPEST_VOLUME_TYPE: volumev2
+
+- job:
+    name: tempest-pg-full
+    parent: tempest-full-py3
+    description: |
+      Base integration test with Neutron networking and PostgreSQL.
+      Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+    vars:
+      devstack_localrc:
+        ENABLE_FILE_INJECTION: true
+        DATABASE_TYPE: postgresql
+
+- project-template:
+    name: integrated-gate-networking
+    description: |
+      Run the python3 Tempest network integration tests (Nova and Neutron related)
+      in check and gate for the neutron integrated gate. This is meant to be
+      run on neutron gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-networking
+
+- project-template:
+    name: integrated-gate-compute
+    description: |
+      Run the python3 Tempest compute integration tests
+      (Nova, Neutron, Cinder and Glance related) in check and gate
+      for the Nova integrated gate. This is meant to be
+      run on Nova gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-compute
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-compute
+
+- project-template:
+    name: integrated-gate-placement
+    description: |
+      Run the python3 Tempest placement integration tests
+      (Nova and Neutron related) in check and gate
+      for the Placement integrated gate. This is meant to be
+      run on Placement gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-placement
+
+- project-template:
+    name: integrated-gate-storage
+    description: |
+      Run the python3 Tempest image & block storage integration tests
+      (Cinder, Glance, Swift and Nova related) in check and gate
+      for the neutron integrated gate. This is meant to be
+      run on Cinder and Glance gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-storage
+
+- project-template:
+    name: integrated-gate-object-storage
+    description: |
+      Run the python3 Tempest object storage integration tests
+      (Swift, Cinder and Glance related) in check and gate
+      for the swift integrated gate. This is meant to be
+      run on swift gate only.
+    check:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
+    gate:
+      jobs:
+        - grenade
+        - tempest-integrated-object-storage
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..f2522af
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,146 @@
+- project:
+    templates:
+      - check-requirements
+      - integrated-gate-py3
+      - openstack-cover-jobs
+      - openstack-python3-victoria-jobs
+      - publish-openstack-docs-pti
+      - release-notes-jobs-python3
+    check:
+      jobs:
+        - devstack-tempest:
+            files:
+              - ^playbooks/
+              - ^roles/
+              - ^.zuul.yaml$
+        - devstack-tempest-ipv6:
+            voting: false
+            files:
+              - ^playbooks/
+              - ^roles/
+              - ^.zuul.yaml$
+        - tempest-full-parallel:
+            # Define list of irrelevant files to use everywhere else
+            irrelevant-files: &tempest-irrelevant-files
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^tools/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-ipv6:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - glance-multistore-cinder-import:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-victoria-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ussuri-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-train-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-stein-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-tox-plugin-sanity-check:
+            irrelevant-files: &tempest-irrelevant-files-2
+              - ^.*\.rst$
+              - ^doc/.*$
+              - ^etc/.*$
+              - ^releasenotes/.*$
+              - ^setup.cfg$
+              - ^tempest/hacking/.*$
+              - ^tempest/tests/.*$
+              - ^.coveragerc$
+              - ^.gitignore$
+              - ^.gitreview$
+              - ^.mailmap$
+              # tools/ is not here since this relies on a script in tools/.
+        - tempest-ipv6-only:
+            irrelevant-files: *tempest-irrelevant-files-2
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-live-migration:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario001-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario002-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario003-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - puppet-openstack-integration-4-scenario004-tempest-centos-7:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-tempest-dvr:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - interop-tempest-consistency:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-test-account-no-admin-py3:
+            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - openstack-tox-bashate:
+            irrelevant-files: *tempest-irrelevant-files-2
+    gate:
+      jobs:
+        - tempest-slow-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-grenade-multinode:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - grenade:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-ipv6-only:
+            irrelevant-files: *tempest-irrelevant-files-2
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files: *tempest-irrelevant-files
+    experimental:
+      jobs:
+        - tempest-cinder-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-all:
+            irrelevant-files: *tempest-irrelevant-files
+        - neutron-tempest-dvr-ha-multinode-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - nova-tempest-v2-api:
+            irrelevant-files: *tempest-irrelevant-files
+        - cinder-tempest-lvm-multibackend:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-pg-full:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-py3-opensuse15:
+            irrelevant-files: *tempest-irrelevant-files
+    periodic-stable:
+      jobs:
+        - tempest-full-victoria-py3
+        - tempest-full-ussuri-py3
+        - tempest-full-train-py3
+        - tempest-full-stein-py3
+    periodic:
+      jobs:
+        - tempest-all
+        - tempest-full-oslo-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
new file mode 100644
index 0000000..832a0d5
--- /dev/null
+++ b/zuul.d/stable-jobs.yaml
@@ -0,0 +1,23 @@
+# NOTE(gmann): This file includes all stable release jobs definition.
+- job:
+    name: tempest-full-victoria-py3
+    parent: tempest-full-py3
+    override-checkout: stable/victoria
+
+- job:
+    name: tempest-full-ussuri-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/ussuri
+
+- job:
+    name: tempest-full-train-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/train
+
+- job:
+    name: tempest-full-stein-py3
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/stein
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
new file mode 100644
index 0000000..387a94b
--- /dev/null
+++ b/zuul.d/tempest-specific.yaml
@@ -0,0 +1,113 @@
+# NOTE(gmann): This file includes all tempest specific jobs definition which
+# are supposed to be run by Tempest gate only.
+- job:
+    name: tempest-full-oslo-master
+    parent: tempest-full-py3
+    description: |
+      Integration test using current git of oslo libs.
+      This ensures that when oslo libs get released that they
+      do not break OpenStack server projects.
+
+      Former name for this job was
+      periodic-tempest-dsvm-oslo-latest-full-master.
+    timeout: 10800
+    required-projects:
+      - opendev.org/openstack/oslo.cache
+      - opendev.org/openstack/oslo.concurrency
+      - opendev.org/openstack/oslo.config
+      - opendev.org/openstack/oslo.context
+      - opendev.org/openstack/oslo.db
+      - opendev.org/openstack/oslo.i18n
+      - opendev.org/openstack/oslo.log
+      - opendev.org/openstack/oslo.messaging
+      - opendev.org/openstack/oslo.middleware
+      - opendev.org/openstack/oslo.policy
+      - opendev.org/openstack/oslo.privsep
+      - opendev.org/openstack/oslo.reports
+      - opendev.org/openstack/oslo.rootwrap
+      - opendev.org/openstack/oslo.serialization
+      - opendev.org/openstack/oslo.service
+      - opendev.org/openstack/oslo.utils
+      - opendev.org/openstack/oslo.versionedobjects
+      - opendev.org/openstack/oslo.vmware
+
+- job:
+    name: tempest-full-parallel
+    parent: tempest-full-py3
+    voting: false
+    branches:
+      - master
+    description: |
+      Base integration test with Neutron networking.
+      It includes all scenarios as it was in the past.
+      This job runs all scenario tests in parallel!
+    timeout: 9000
+    vars:
+      tox_envlist: full-parallel
+      run_tempest_cleanup: true
+      run_tempest_dry_cleanup: true
+
+- job:
+    name: tempest-full-py3-ipv6
+    parent: devstack-tempest-ipv6
+    branches: ^(?!stable/ocata).*$
+    description: |
+      Base integration test with Neutron networking, IPv6 and py3.
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+      devstack_services:
+        s-account: false
+        s-container: false
+        s-object: false
+        s-proxy: false
+        # without Swift, c-bak cannot run (in the Gate at least)
+        c-bak: false
+
+- job:
+    name: tempest-full-py3-opensuse15
+    parent: tempest-full-py3
+    nodeset: devstack-single-node-opensuse-15
+    description: |
+      Base integration test with Neutron networking and py36 running
+      on openSUSE Leap 15.x
+    voting: false
+
+- job:
+    name: tempest-tox-plugin-sanity-check
+    parent: tox
+    description: |
+      Run tempest plugin sanity check script using tox.
+    nodeset: ubuntu-focal
+    vars:
+      tox_envlist: plugin-sanity-check
+    timeout: 5000
+
+- job:
+    name: tempest-full-test-account-py3
+    parent: tempest-full-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials instead of dynamic credentials and py3.
+      Former names for this job were:
+        - legacy-tempest-dsvm-full-test-accounts
+        - legacy-tempest-dsvm-neutron-full-test-accounts
+        - legacy-tempest-dsvm-identity-v3-test-accounts
+    vars:
+      devstack_localrc:
+        TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+    name: tempest-full-test-account-no-admin-py3
+    parent: tempest-full-test-account-py3
+    description: |
+      This job runs the full set of tempest tests using pre-provisioned
+      credentials and py3 without having an admin account.
+      Former name for this job was:
+        - legacy-tempest-dsvm-neutron-full-non-admin
+
+    vars:
+      devstack_localrc:
+        TEMPEST_HAS_ADMIN: False