Merge "compute: Skip AttachVolumeShelveTestJSON when cross_az_attach unavailable"
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index c20f204..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,778 +0,0 @@
-- job:
- name: devstack-tempest
- parent: devstack
- description: |
- Base Tempest job.
-
- This Tempest job provides the base for both the single and multi-node
- test setup. To run a multi-node test inherit from devstack-tempest and
- set the nodeset to a multi-node one.
- required-projects: &base_required-projects
- - opendev.org/openstack/tempest
- timeout: 7200
- roles: &base_roles
- - zuul: opendev.org/openstack/devstack
- vars: &base_vars
- devstack_services:
- tempest: true
- devstack_local_conf:
- test-config:
- $TEMPEST_CONFIG:
- compute:
- min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
- test_results_stage_name: test_results
- zuul_copy_output:
- '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
- '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
- '{{ devstack_base_dir }}/tempest/tempest.log': logs
- '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
- '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
- '{{ stage_dir }}/stackviz': logs
- extensions_to_txt:
- conf: true
- log: true
- yaml: true
- yml: true
- run: playbooks/devstack-tempest.yaml
- post-run: playbooks/post-tempest.yaml
-
-- job:
- name: tempest-all
- parent: devstack-tempest
- description: |
- Integration test that runs all tests.
- Former name for this job was:
- * legacy-periodic-tempest-dsvm-all-master
- vars:
- tox_envlist: all
- tempest_test_regex: tempest
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
-
-- job:
- name: devstack-tempest-ipv6
- parent: devstack-ipv6
- description: |
- Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
- which set the IPv6-only setting for OpenStack services. As part of
- run phase, this job will verify the IPv6 setting and check the services
- endpoints and listen addresses are IPv6. Basically it will run the script
- ./tool/verify-ipv6-only-deployments.sh
-
- Child jobs of this job can run their own set of tests and can
- add post-run playebooks to extend the IPv6 verification specific
- to their deployed services.
- Check the wiki page for more details about project jobs setup
- - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
- required-projects: *base_required-projects
- timeout: 7200
- roles: *base_roles
- vars: *base_vars
- run: playbooks/devstack-tempest-ipv6.yaml
- post-run: playbooks/post-tempest.yaml
-
-- job:
- name: tempest-ipv6-only
- parent: devstack-tempest-ipv6
- # This currently works from stable/pike on.
- branches: ^(?!stable/ocata).*$
- description: |
- Integration test of IPv6-only deployments. This job runs
- smoke and IPv6 relates tests only. Basic idea is to test
- whether OpenStack Services listen on IPv6 addrress or not.
- timeout: 10800
- vars:
- tox_envlist: ipv6-only
-
-- job:
- name: tempest-full
- parent: devstack-tempest
- # This currently works from stable/pike on.
- # Before stable/pike, legacy version of tempest-full
- # 'legacy-tempest-dsvm-neutron-full' run.
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking and py27.
- This job is supposed to run until stable/train setup only.
- If you are running it on stable/ussuri gate onwards for python2.7
- coverage then you need to do override-checkout with any stable
- branch less than or equal to stable/train.
- Former names for this job where:
- * legacy-tempest-dsvm-neutron-full
- * gate-tempest-dsvm-neutron-full-ubuntu-xenial
- vars:
- tox_envlist: full
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
- ENABLE_VOLUME_MULTIATTACH: true
- USE_PYTHON3: False
- devstack_services:
- # NOTE(mriedem): Disable the cinder-backup service from tempest-full
- # since tempest-full is in the integrated-gate project template but
- # the backup tests do not really involve other services so they should
- # be run in some more cinder-specific job, especially because the
- # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
- c-bak: false
-
-- job:
- name: tempest-full-oslo-master
- parent: tempest-full-py3
- description: |
- Integration test using current git of oslo libs.
- This ensures that when oslo libs get released that they
- do not break OpenStack server projects.
-
- Former name for this job was
- periodic-tempest-dsvm-oslo-latest-full-master.
- timeout: 10800
- required-projects:
- - opendev.org/openstack/oslo.cache
- - opendev.org/openstack/oslo.concurrency
- - opendev.org/openstack/oslo.config
- - opendev.org/openstack/oslo.context
- - opendev.org/openstack/oslo.db
- - opendev.org/openstack/oslo.i18n
- - opendev.org/openstack/oslo.log
- - opendev.org/openstack/oslo.messaging
- - opendev.org/openstack/oslo.middleware
- - opendev.org/openstack/oslo.policy
- - opendev.org/openstack/oslo.privsep
- - opendev.org/openstack/oslo.reports
- - opendev.org/openstack/oslo.rootwrap
- - opendev.org/openstack/oslo.serialization
- - opendev.org/openstack/oslo.service
- - opendev.org/openstack/oslo.utils
- - opendev.org/openstack/oslo.versionedobjects
- - opendev.org/openstack/oslo.vmware
-
-- job:
- name: tempest-full-parallel
- parent: tempest-full-py3
- voting: false
- branches:
- - master
- description: |
- Base integration test with Neutron networking.
- It includes all scenarios as it was in the past.
- This job runs all scenario tests in parallel!
- timeout: 9000
- vars:
- tox_envlist: full-parallel
- run_tempest_cleanup: true
- run_tempest_dry_cleanup: true
-
-- job:
- name: tempest-full-py3
- parent: devstack-tempest
- # This currently works from stable/pike on.
- # Before stable/pike, legacy version of tempest-full
- # 'legacy-tempest-dsvm-neutron-full' run.
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking and py3.
- Former names for this job where:
- * legacy-tempest-dsvm-py35
- * gate-tempest-dsvm-py35
- vars:
- tox_envlist: full
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- GLANCE_USE_IMPORT_WORKFLOW: True
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- # NOTE(mriedem): Disable the cinder-backup service from
- # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
- # project template but the backup tests do not really involve other
- # services so they should be run in some more cinder-specific job,
- # especially because the tests fail at a high rate (see bugs 1483434,
- # 1813217, 1745168)
- c-bak: false
-
-- job:
- name: tempest-integrated-networking
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for networking. This is subset of
- 'tempest-full-py3' job and run only Neutron and Nova related tests.
- This is meant to be run on neutron gate only.
- vars:
- tox_envlist: integrated-network
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-compute
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for compute. This is
- subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
- and Glance related tests. This is meant to be run on Nova gate only.
- vars:
- tox_envlist: integrated-compute
- tempest_black_regex: ""
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-placement
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for placement. This is
- subset of 'tempest-full-py3' job and run Nova and Neutron
- related tests. This is meant to be run on Placement gate only.
- vars:
- tox_envlist: integrated-placement
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-storage
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for image & block storage. This is
- subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
- related tests. This is meant to be run on Cinder and Glance gate only.
- vars:
- tox_envlist: integrated-storage
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- GLANCE_USE_IMPORT_WORKFLOW: True
-
-- job:
- name: tempest-integrated-object-storage
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for object storage. This is
- subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
- related tests. This is meant to be run on Swift gate only.
- vars:
- tox_envlist: integrated-object-storage
- devstack_localrc:
- # NOTE(gmann): swift is not ready on python3 yet and devstack
- # install it on python2.7 only. But settting the USE_PYTHON3
- # for future once swift is ready on py3.
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-py3-ipv6
- parent: devstack-tempest-ipv6
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking, IPv6 and py3.
- vars:
- tox_envlist: full
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- c-bak: false
-
-- job:
- name: tempest-multinode-full-base
- parent: devstack-tempest
- description: |
- Base multinode integration test with Neutron networking and py27.
- Former names for this job were:
- * neutron-tempest-multinode-full
- * legacy-tempest-dsvm-neutron-multinode-full
- * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
- This job includes two nodes, controller / tempest plus a subnode, but
- it can be used with different topologies, as long as a controller node
- and a tempest one exist.
- timeout: 10800
- vars:
- tox_envlist: full
- devstack_localrc:
- FORCE_CONFIG_DRIVE: false
- NOVA_ALLOW_MOVE_TO_SAME_HOST: false
- LIVE_MIGRATION_AVAILABLE: true
- USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
- group-vars:
- peers:
- devstack_localrc:
- NOVA_ALLOW_MOVE_TO_SAME_HOST: false
- LIVE_MIGRATION_AVAILABLE: true
- USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-focal
- # This job runs on Focal from stable/victoria on.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-bionic
- # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
- # This job is prepared to make sure all stable branches from stable/stein till stable/train
- # will keep running on bionic. This can be removed once stable/train is EOL.
- branches:
- - stable/stein
- - stable/train
- - stable/ussuri
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-xenial
- # This job runs on Xenial and this is for stable/pike, stable/queens
- # and stable/rocky. This job is prepared to make sure all stable branches
- # before stable/stein will keep running on xenial. This job can be
- # removed once stable/rocky is EOL.
- branches:
- - stable/pike
- - stable/queens
- - stable/rocky
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full-py3
- parent: tempest-multinode-full
- vars:
- devstack_localrc:
- USE_PYTHON3: true
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-py3-opensuse15
- parent: tempest-full-py3
- nodeset: devstack-single-node-opensuse-15
- description: |
- Base integration test with Neutron networking and py36 running
- on openSUSE Leap 15.x
- voting: false
-
-- job:
- name: tempest-slow
- parent: tempest-multinode-full
- description: |
- This multinode integration job will run all the tests tagged as slow.
- It enables the lvm multibackend setup to cover few scenario tests.
- This job will run only slow tests (API or Scenario) serially.
-
- Former names for this job were:
- * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
- * tempest-scenario-multinode-lvm-multibackend
- timeout: 10800
- vars:
- tox_envlist: slow-serial
- devstack_localrc:
- CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_plugins:
- neutron: https://opendev.org/openstack/neutron
- devstack_services:
- neutron-placement: true
- neutron-qos: true
- devstack_local_conf:
- post-config:
- "/$NEUTRON_CORE_PLUGIN_CONF":
- ovs:
- bridge_mappings: public:br-ex
- resource_provider_bandwidths: br-ex:1000000:1000000
- test-config:
- $TEMPEST_CONFIG:
- network-feature-enabled:
- qos_placement_physnet: public
- tempest_concurrency: 2
- group-vars:
- # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
- # the controller and subnode prior to Rocky so we have to make sure the
- # variable is set in both locations.
- subnode:
- devstack_localrc:
- ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
- name: tempest-slow-py3
- parent: tempest-slow
- vars:
- devstack_localrc:
- USE_PYTHON3: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- c-bak: false
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-victoria-py3
- parent: tempest-full-py3
- override-checkout: stable/victoria
-
-- job:
- name: tempest-full-ussuri-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
-
-- job:
- name: tempest-full-train-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/train
-
-- job:
- name: tempest-full-stein-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/stein
-
-- job:
- name: tempest-tox-plugin-sanity-check
- parent: tox
- description: |
- Run tempest plugin sanity check script using tox.
- nodeset: ubuntu-focal
- vars:
- tox_envlist: plugin-sanity-check
- timeout: 5000
-
-- job:
- name: tempest-cinder-v2-api
- parent: devstack-tempest
- branches:
- - master
- description: |
- This job runs the cinder API test against v2 endpoint.
- vars:
- tox_envlist: all
- tempest_test_regex: api.*volume
- devstack_localrc:
- TEMPEST_VOLUME_TYPE: volumev2
-
-- job:
- name: tempest-full-test-account-py3
- parent: tempest-full-py3
- description: |
- This job runs the full set of tempest tests using pre-provisioned
- credentials instead of dynamic credentials and py3.
- Former names for this job were:
- - legacy-tempest-dsvm-full-test-accounts
- - legacy-tempest-dsvm-neutron-full-test-accounts
- - legacy-tempest-dsvm-identity-v3-test-accounts
- vars:
- devstack_localrc:
- TEMPEST_USE_TEST_ACCOUNTS: True
-
-- job:
- name: tempest-full-test-account-no-admin-py3
- parent: tempest-full-test-account-py3
- description: |
- This job runs the full set of tempest tests using pre-provisioned
- credentials and py3 without having an admin account.
- Former name for this job was:
- - legacy-tempest-dsvm-neutron-full-non-admin
-
- vars:
- devstack_localrc:
- TEMPEST_HAS_ADMIN: False
-
-- job:
- name: tempest-pg-full
- parent: tempest-full-py3
- description: |
- Base integration test with Neutron networking and PostgreSQL.
- Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
- vars:
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
- DATABASE_TYPE: postgresql
-
-- project-template:
- name: integrated-gate-networking
- description: |
- Run the python3 Tempest network integration tests (Nova and Neutron related)
- in check and gate for the neutron integrated gate. This is meant to be
- run on neutron gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-networking
- gate:
- jobs:
- - grenade
- - tempest-integrated-networking
-
-- project-template:
- name: integrated-gate-compute
- description: |
- Run the python3 Tempest compute integration tests
- (Nova, Neutron, Cinder and Glance related) in check and gate
- for the Nova integrated gate. This is meant to be
- run on Nova gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-compute
- gate:
- jobs:
- - grenade
- - tempest-integrated-compute
-
-- project-template:
- name: integrated-gate-placement
- description: |
- Run the python3 Tempest placement integration tests
- (Nova and Neutron related) in check and gate
- for the Placement integrated gate. This is meant to be
- run on Placement gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-placement
- gate:
- jobs:
- - grenade
- - tempest-integrated-placement
-
-- project-template:
- name: integrated-gate-storage
- description: |
- Run the python3 Tempest image & block storage integration tests
- (Cinder, Glance, Swift and Nova related) in check and gate
- for the neutron integrated gate. This is meant to be
- run on Cinder and Glance gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-storage
- gate:
- jobs:
- - grenade
- - tempest-integrated-storage
-
-- project-template:
- name: integrated-gate-object-storage
- description: |
- Run the python3 Tempest object storage integration tests
- (Swift, Cinder and Glance related) in check and gate
- for the swift integrated gate. This is meant to be
- run on swift gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-object-storage
- gate:
- jobs:
- - grenade
- - tempest-integrated-object-storage
-
-- project:
- templates:
- - check-requirements
- - integrated-gate-py3
- - openstack-cover-jobs
- - openstack-python3-victoria-jobs
- - publish-openstack-docs-pti
- - release-notes-jobs-python3
- check:
- jobs:
- - devstack-tempest:
- files:
- - ^playbooks/
- - ^roles/
- - ^.zuul.yaml$
- - devstack-tempest-ipv6:
- voting: false
- files:
- - ^playbooks/
- - ^roles/
- - ^.zuul.yaml$
- - tempest-full-parallel:
- # Define list of irrelevant files to use everywhere else
- irrelevant-files: &tempest-irrelevant-files
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^tools/.*$
- - ^.coveragerc$
- - ^.gitignore$
- - ^.gitreview$
- - ^.mailmap$
- - tempest-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-ipv6:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - glance-multistore-cinder-import:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-victoria-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-ussuri-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-train-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-stein-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-multinode-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-tox-plugin-sanity-check:
- irrelevant-files: &tempest-irrelevant-files-2
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^.coveragerc$
- - ^.gitignore$
- - ^.gitreview$
- - ^.mailmap$
- # tools/ is not here since this relies on a script in tools/.
- - tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
- - tempest-slow-py3:
- irrelevant-files: *tempest-irrelevant-files
- - nova-live-migration:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
- irrelevant-files: *tempest-irrelevant-files
- - grenade:
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario001-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario002-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario003-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario004-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - interop-tempest-consistency:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-test-account-py3:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-test-account-no-admin-py3:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - openstack-tox-bashate:
- irrelevant-files: *tempest-irrelevant-files-2
- gate:
- jobs:
- - tempest-slow-py3:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - grenade:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- experimental:
- jobs:
- - tempest-cinder-v2-api:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-all:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr-ha-multinode-full:
- irrelevant-files: *tempest-irrelevant-files
- - nova-tempest-v2-api:
- irrelevant-files: *tempest-irrelevant-files
- - cinder-tempest-lvm-multibackend:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-pg-full:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse15:
- irrelevant-files: *tempest-irrelevant-files
- periodic-stable:
- jobs:
- - tempest-full-victoria-py3
- - tempest-full-ussuri-py3
- - tempest-full-train-py3
- - tempest-full-stein-py3
- periodic:
- jobs:
- - tempest-all
- - tempest-full-oslo-master
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index c7004dd..c1981f9 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
.. code-block:: python
- class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+ class BaseTestCase1(api_version_utils.BaseMicroversionTest):
- [..]
- @classmethod
- def skip_checks(cls):
- super(BaseTestCase1, cls).skip_checks()
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- CONF.compute.min_microversion,
- CONF.compute.max_microversion)
+ [..]
+ @classmethod
+ def skip_checks(cls):
+ super(BaseTestCase1, cls).skip_checks()
+ api_version_utils.check_skip_with_microversion(cls.min_microversion,
+ cls.max_microversion,
+ CONF.compute.min_microversion,
+ CONF.compute.max_microversion)
Skip logic can be added in tests base class or any specific test class depends on
tests class structure.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index ab1b0b1..6726def 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -268,12 +268,12 @@
class MyAPIClient(rest_client.RestClient):
- def __init__(self, auth_provider, service, region,
- my_arg, my_arg2=True, **kwargs):
- super(MyAPIClient, self).__init__(
- auth_provider, service, region, **kwargs)
- self.my_arg = my_arg
- self.my_args2 = my_arg
+ def __init__(self, auth_provider, service, region,
+ my_arg, my_arg2=True, **kwargs):
+ super(MyAPIClient, self).__init__(
+ auth_provider, service, region, **kwargs)
+ self.my_arg = my_arg
+ self.my_args2 = my_arg
Finally the service client should be structured in a python module, so that all
service client classes are importable from it. Each major API version should
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def skip_checks(cls):
- """This section is used to evaluate config early and skip all test
- methods based on these checks
- """
- super(TestExampleCase, cls).skip_checks()
- if not CONF.section.foo
- cls.skip('A helpful message')
+ @classmethod
+ def skip_checks(cls):
+ """This section is used to evaluate config early and skip all test
+ methods based on these checks
+ """
+ super(TestExampleCase, cls).skip_checks()
+ if not CONF.section.foo
+ cls.skip('A helpful message')
- @classmethod
- def setup_credentials(cls):
- """This section is used to do any manual credential allocation and also
- in the case of dynamic credentials to override the default network
- resource creation/auto allocation
- """
- # This call is used to tell the credential allocator to not create any
- # network resources for this test case. It also enables selective
- # creation of other neutron resources. NOTE: it must go before the
- # super call
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ """This section is used to do any manual credential allocation and also
+ in the case of dynamic credentials to override the default network
+ resource creation/auto allocation
+ """
+ # This call is used to tell the credential allocator to not create any
+ # network resources for this test case. It also enables selective
+ # creation of other neutron resources. NOTE: it must go before the
+ # super call
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
- @classmethod
- def setup_clients(cls):
- """This section is used to setup client aliases from the manager object
- or to initialize any additional clients. Except in a few very
- specific situations you should not need to use this.
- """
- super(TestExampleCase, cls).setup_clients()
- cls.servers_client = cls.os_primary.servers_client
+ @classmethod
+ def setup_clients(cls):
+ """This section is used to setup client aliases from the manager object
+ or to initialize any additional clients. Except in a few very
+ specific situations you should not need to use this.
+ """
+ super(TestExampleCase, cls).setup_clients()
+ cls.servers_client = cls.os_primary.servers_client
- @classmethod
- def resource_setup(cls):
- """This section is used to create any resources or objects which are
- going to be used and shared by **all** test methods in the
- TestCase. Note then anything created in this section must also be
- destroyed in the corresponding resource_cleanup() method (which will
- be run during tearDownClass())
- """
- super(TestExampleCase, cls).resource_setup()
- cls.shared_server = cls.servers_client.create_server(...)
- cls.addClassResourceCleanup(waiters.wait_for_server_termination,
- cls.servers_client,
- cls.shared_server['id'])
- cls.addClassResourceCleanup(
- test_utils.call_and_ignore_notfound_exc(
- cls.servers_client.delete_server,
- cls.shared_server['id']))
+ @classmethod
+ def resource_setup(cls):
+ """This section is used to create any resources or objects which are
+ going to be used and shared by **all** test methods in the
+ TestCase. Note then anything created in this section must also be
+ destroyed in the corresponding resource_cleanup() method (which will
+ be run during tearDownClass())
+ """
+ super(TestExampleCase, cls).resource_setup()
+ cls.shared_server = cls.servers_client.create_server(...)
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client,
+ cls.shared_server['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc(
+ cls.servers_client.delete_server,
+ cls.shared_server['id']))
.. _credentials:
@@ -150,9 +150,9 @@
credentials = ['primary', 'admin']
- @classmethod
- def skip_checks(cls):
- ...
+ @classmethod
+ def skip_checks(cls):
+ ...
In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True, router=False)
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(network=True, subnet=True, router=False)
+ super(TestExampleCase, cls).setup_credentials()
There are 2 quirks with the usage here. First for the set_network_resources
function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
This will not allocate any networking resources. This is because by default all
the arguments default to False.
@@ -282,8 +282,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- self.os_primary.servers_client.create_server(...)
+ def test_example_create_server(self):
+ self.os_primary.servers_client.create_server(...)
is all you need to do. As described previously, in the above example the
``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
The credentials object provides access to all of the credential information you
would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
- username = credentials.username
- user_id = credentials.user_id
- password = credentials.password
- tenant_id = credentials.tenant_id
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
+ username = credentials.username
+ user_id = credentials.user_id
+ password = credentials.password
+ tenant_id = credentials.tenant_id
diff --git a/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
new file mode 100644
index 0000000..8e42e85
--- /dev/null
+++ b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added associate_floating_ip() and dissociate_floating_ip() methods
+ to the scenario manager.
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 9340997..342380e 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -112,7 +112,5 @@
server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'available')
- volume_after_detach = self.servers_client.list_volume_attachments(
- server['id'])['volumeAttachments']
- self.assertEqual(0, len(volume_after_detach),
- "Failed to detach volume")
+ waiters.wait_for_volume_attachment_remove_from_server(
+ self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0601bbe..102792e 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -427,3 +427,33 @@
CONF.compute.build_interval, original_ip_count):
raise lib_exc.TimeoutException(
'Timed out while waiting for IP count to decrease.')
+
+
+class AttachInterfacesV270Test(AttachInterfacesTestBase):
+ """Test interface API with microversion greater than 2.69"""
+ min_microversion = '2.70'
+
+ @decorators.idempotent_id('2853f095-8277-4067-92bd-9f10bd4f8e0c')
+ @utils.services('network')
+ def test_create_get_list_interfaces(self):
+ """Test interface API with microversion greater than 2.69
+
+ Checking create, get, list interface APIs response schema.
+ """
+ server = self.create_test_server(wait_until='ACTIVE')
+ try:
+ iface = self.interfaces_client.create_interface(server['id'])[
+ 'interfaceAttachment']
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server['id'], iface['port_id'],
+ 'ACTIVE')
+ except lib_exc.BadRequest as e:
+ msg = ('Multiple possible networks found, use a Network ID to be '
+ 'more specific.')
+ if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
+ raise
+ else:
+ # just to check the response schema
+ self.interfaces_client.show_interface(
+ server['id'], iface['port_id'])
+ self.interfaces_client.list_interfaces(server['id'])
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 28299a4..9e25901 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -402,7 +402,8 @@
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
- sorted_list = [image['size'] for image in images_list]
+ sorted_list = [image['size'] for image in images_list
+ if image['size'] is not None]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 625e08e..e3c33c7 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -317,6 +317,32 @@
'seconds', attachment_id, volume_id, time.time() - start)
+def wait_for_volume_attachment_remove_from_server(
+ client, server_id, volume_id):
+ """Waits for a volume to be removed from a given server.
+
+ This waiter checks the compute API if the volume attachment is removed.
+ """
+ start = int(time.time())
+ volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+ while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+ time.sleep(client.build_interval)
+
+ timed_out = int(time.time()) - start >= client.build_timeout
+ if timed_out:
+ message = ('Volume %s failed to detach from server %s within '
+ 'the required time (%s s) from the compute API '
+ 'perspective' %
+ (volume_id, server_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+
+ volumes = client.list_volume_attachments(server_id)[
+ 'volumeAttachments']
+
+ return volumes
+
+
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index 3761d8e..382b80f 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -761,11 +761,13 @@
deprecated_reason="This config option is no longer "
"used anywhere, so it can be removed."),
cfg.StrOpt('port_vnic_type',
- choices=[None, 'normal', 'direct', 'macvtap'],
+ choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
+ 'baremetal', 'virtio-forwarder'],
help="vnic_type to use when launching instances"
" with pre-configured ports."
" Supported ports are:"
- " ['normal','direct','macvtap']"),
+ " ['normal', 'direct', 'macvtap', 'direct-physical', "
+ "'baremetal', 'virtio-forwarder']"),
cfg.Opt('port_profile',
type=ProfileType,
default={},
@@ -787,36 +789,37 @@
NetworkFeaturesGroup = [
cfg.BoolOpt('ipv6',
default=True,
- help="Allow the execution of IPv6 tests"),
+ help="Allow the execution of IPv6 tests."),
cfg.ListOpt('api_extensions',
default=['all'],
help="A list of enabled network extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
- "To get the list of extensions run: 'neutron ext-list'"),
+ "To get the list of extensions run: "
+ "'openstack extension list --network'"),
cfg.ListOpt('available_features',
default=['all'],
help="A list of available network features with a special "
"entry all that indicates every feature is available. "
- "Empty list indicates all features are disabled."
+ "Empty list indicates all features are disabled. "
"This list can contain features that are not "
- "discoverable through API."),
+ "discoverable through the API."),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
"the extended IPv6 attributes ipv6_ra_mode "
- "and ipv6_address_mode"
+ "and ipv6_address_mode."
),
cfg.BoolOpt('port_admin_state_change',
default=True,
- help="Does the test environment support changing"
- " port admin state"),
+ help="Does the test environment support changing "
+ "port admin state?"),
cfg.BoolOpt('port_security',
default=False,
help="Does the test environment support port security?"),
cfg.BoolOpt('floating_ips',
default=True,
- help='Does the test environment support floating_ips'),
+ help='Does the test environment support floating_ips?'),
cfg.StrOpt('qos_placement_physnet', default=None,
help='Name of the physnet for placement based minimum '
'bandwidth allocation.'),
diff --git a/tempest/lib/api_schema/response/compute/v2_70/interfaces.py b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
new file mode 100644
index 0000000..3160b92
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import interfaces
+
+# ****** Schemas changed in microversion 2.70 *****************
+#
+# 1. add optional field 'tag' in the Response body of the following APIs:
+# - GET /servers/{server_id}/os-interface
+# - POST /servers/{server_id}/os-interface
+# - GET /servers/{server_id}/os-interface/{port_id}
+
+get_create_interfaces = copy.deepcopy(interfaces.get_create_interfaces)
+get_create_interfaces['response_body']['properties']['interfaceAttachment'][
+ 'properties'].update({'tag': {'type': ['string', 'null']}})
+
+list_interfaces = copy.deepcopy(interfaces.list_interfaces)
+list_interfaces['response_body']['properties']['interfaceAttachments'][
+ 'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.1 ***
+delete_interface = copy.deepcopy(interfaces.delete_interface)
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index e1c02fa..9244a4a 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -16,15 +16,22 @@
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema
+from tempest.lib.api_schema.response.compute.v2_70 import interfaces as \
+ schemav270
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class InterfacesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.69', 'schema': schema},
+ {'min': '2.70', 'max': None, 'schema': schemav270}]
+
def list_interfaces(self, server_id):
resp, body = self.get('servers/%s/os-interface' % server_id)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -40,6 +47,7 @@
resp, body = self.post('servers/%s/os-interface' % server_id,
body=post_body)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -47,6 +55,7 @@
resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
port_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 3c37b70..2a41d13 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -143,10 +143,10 @@
# resp part which is not used in scenario tests
def create_port(self, network_id, client=None, **kwargs):
- """Creates port"""
+ """Creates port for the respective network_id"""
if not client:
client = self.ports_client
- name = data_utils.rand_name(self.__class__.__name__)
+ name = kwargs.pop('namestart', self.__class__.__name__)
if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
if CONF.network.port_profile and 'binding:profile' not in kwargs:
@@ -155,6 +155,7 @@
name=name,
network_id=network_id,
**kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
@@ -200,6 +201,14 @@
direct: an SR-IOV port that is directly attached to a VM
macvtap: an SR-IOV port that is attached to a VM via a macvtap
device.
+ direct-physical: an SR-IOV port that is directly attached to a
+ VM using physical instead of virtual
+ functions.
+ baremetal: a baremetal port directly attached to a baremetal
+ node.
+ virtio-forwarder: an SR-IOV port that is indirectly attached
+ to a VM using a low-latency vhost-user
+ forwarding process.
Defaults to ``CONF.network.port_vnic_type``.
* *port_profile* (``dict``) --
This attribute is a dictionary that can be used (with admin
@@ -455,7 +464,8 @@
admin_volumes_client.wait_for_resource_deletion(volume['id'])
admin_volume_type_client.delete_volume_type(volume_type['id'])
- def create_volume_type(self, client=None, name=None, backend_name=None):
+ def create_volume_type(self, client=None, name=None, backend_name=None,
+ **kwargs):
"""Creates volume type
In a multiple-storage back-end configuration,
@@ -482,12 +492,14 @@
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
- extra_specs = {}
+ extra_specs = kwargs.pop("extra_specs", {})
if backend_name:
- extra_specs = {"volume_backend_name": backend_name}
+ extra_specs.update({"volume_backend_name": backend_name})
- volume_type = client.create_volume_type(
- name=randomized_name, extra_specs=extra_specs)['volume_type']
+ volume_type_resp = client.create_volume_type(
+ name=randomized_name, extra_specs=extra_specs, **kwargs)
+ volume_type = volume_type_resp['volume_type']
+
self.assertIn('id', volume_type)
self.addCleanup(self._cleanup_volume_type, volume_type)
return volume_type
@@ -574,7 +586,7 @@
linux_client.validate_authentication()
return linux_client
- def image_create(self, name='scenario-img'):
+ def image_create(self, name='scenario-img', **kwargs):
img_path = CONF.scenario.img_file
if not os.path.exists(img_path):
# TODO(kopecmartin): replace LOG.warning for rasing
@@ -614,6 +626,7 @@
# Additional properties are flattened out in the v2 API.
if img_properties:
params.update(img_properties)
+ params.update(kwargs)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
@@ -650,7 +663,7 @@
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
- def create_server_snapshot(self, server, name=None):
+ def create_server_snapshot(self, server, name=None, **kwargs):
"""Creates server snapshot"""
# Glance client
_image_client = self.image_client
@@ -659,7 +672,7 @@
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
- image = _images_client.create_image(server['id'], name=name)
+ image = _images_client.create_image(server['id'], name=name, **kwargs)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
@@ -856,15 +869,25 @@
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None):
"""Returns timestamp
This wrapper utility does ssh and returns the timestamp.
+
+ :param ip_address: The floating IP or fixed IP of the remote server
+ :param dev_name: Name of the device that stores the timestamp
+ :param mount_path: Path which should be used as mount point for
+ dev_name
+ :param private_key: The SSH private key to use for authentication
+ :param server: Server dict, used for debugging purposes
+ :param username: Name of the Linux account on the remote server
"""
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -951,12 +974,21 @@
return self.create_server(**create_kwargs)
- def create_volume_from_image(self):
- """Create volume from image"""
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
+ def create_volume_from_image(self, **kwargs):
+ """Create volume from image.
+
+ :param image_id: ID of the image to create volume from,
+ CONF.compute.image_ref by default
+ :param name: name of the volume,
+ '$classname-volume-origin' by default
+ :param **kwargs: additional parameters
+ """
+ image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+ name = kwargs.pop('name', None)
+ if not name:
+ namestart = self.__class__.__name__ + '-volume-origin'
+ name = data_utils.rand_name(namestart)
+ return self.create_volume(name=name, imageRef=image_id, **kwargs)
class NetworkScenarioTest(ScenarioTest):
@@ -1151,6 +1183,32 @@
floating_ip['id'])
return floating_ip
+ def associate_floating_ip(self, floating_ip, server):
+ """Associate floating ip
+
+ This wrapper utility attaches the floating_ip for
+ the respective port_id of server
+ """
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def disassociate_floating_ip(self, floating_ip):
+ """Disassociates floating ip
+
+ This wrapper utility disassociates given floating ip.
+ :param floating_ip: a dict which is a return value of
+ floating_ips_client.create_floatingip method
+ """
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index 74d4ed9..86639c5 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
+import testtools
from tempest.common import utils
from tempest.common import waiters
@@ -23,7 +23,6 @@
from tempest.scenario import manager
-LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -65,6 +64,8 @@
cls.routers_client = cls.os_adm.routers_client
cls.qos_client = cls.os_admin.qos_client
cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+ cls.flavors_client = cls.os_adm.flavors_client
+ cls.servers_client = cls.os_adm.servers_client
@classmethod
def skip_checks(cls):
@@ -74,6 +75,11 @@
"placement based QoS allocation."
raise cls.skipException(msg)
+ def setUp(self):
+ super(MinBwAllocationPlacementTest, self).setUp()
+ self._check_if_allocation_is_possible()
+ self._create_network_and_qos_policies()
+
def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
policy = self.qos_client.create_qos_policy(
name=data_utils.rand_name(name_prefix),
@@ -139,6 +145,33 @@
self.fail('For %s:%s there should be no available candidate!' %
(self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+ def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+ wait_until = (None if status == 'ERROR' else status)
+ port = self.create_port(
+ self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+ server = self.create_server(networks=[{'port': port['id']}],
+ wait_until=wait_until)
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status=status, ready_wait=False, raise_on_error=False)
+ return server, port
+
+ def _assert_allocation_is_as_expected(self, allocations, port_id):
+ self.assertGreater(len(allocations['allocations']), 0)
+ bw_resource_in_alloc = False
+ for rp, resources in allocations['allocations'].items():
+ if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+ bw_resource_in_alloc = True
+ allocation_rp = rp
+ self.assertTrue(bw_resource_in_alloc)
+
+ # Check binding_profile of the port is not empty and equals with the
+ # rp uuid
+ port = self.os_admin.ports_client.show_port(port_id)
+ self.assertEqual(allocation_rp,
+ port['port']['binding:profile']['allocation'])
+
@decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
@@ -162,40 +195,13 @@
it should fail.
"""
- self._check_if_allocation_is_possible()
-
- self._create_network_and_qos_policies()
-
- valid_port = self.create_port(
- self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
-
- server1 = self.create_server(
- networks=[{'port': valid_port['id']}])
+ server1, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
allocations = self.placement_client.list_allocations(server1['id'])
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
- self.assertGreater(len(allocations['allocations']), 0)
- bw_resource_in_alloc = False
- for rp, resources in allocations['allocations'].items():
- if self.INGRESS_RESOURCE_CLASS in resources['resources']:
- bw_resource_in_alloc = True
- allocation_rp = rp
- self.assertTrue(bw_resource_in_alloc)
- # Check that binding_profile of the port is not empty and equals with
- # the rp uuid
- port = self.os_admin.ports_client.show_port(valid_port['id'])
- self.assertEqual(allocation_rp,
- port['port']['binding:profile']['allocation'])
-
- # boot another vm with max int bandwidth
- not_valid_port = self.create_port(
- self.prov_network['id'],
- qos_policy_id=self.qos_policy_not_valid['id'])
- server2 = self.create_server(
- wait_until=None,
- networks=[{'port': not_valid_port['id']}])
- waiters.wait_for_server_status(
- client=self.os_primary.servers_client, server_id=server2['id'],
- status='ERROR', ready_wait=False, raise_on_error=False)
+ server2, not_valid_port = self._boot_vm_with_min_bw(
+ self.qos_policy_not_valid['id'], status='ERROR')
allocations = self.placement_client.list_allocations(server2['id'])
self.assertEqual(0, len(allocations['allocations']))
@@ -205,3 +211,90 @@
# Check that binding_profile of the port is empty
port = self.os_admin.ports_client.show_port(not_valid_port['id'])
self.assertEqual(0, len(port['port']['binding:profile']))
+
+ @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @utils.services('compute', 'network')
+ def test_migrate_with_qos_min_bw_allocation(self):
+ """Scenario to migrate VM with QoS min bw allocation in placement
+
+ Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+ checks, and
+ * migrate the server
+ * confirm the resize, if the VM state is VERIFY_RESIZE
+ * If the VM goes to ACTIVE state check that allocations are as
+ expected.
+ """
+ server, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
+ allocations = self.placement_client.list_allocations(server['id'])
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+ self.servers_client.migrate_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+ allocations = self.placement_client.list_allocations(server['id'])
+
+ # TODO(lajoskatona): Check that the allocations are ok for the
+ # migration?
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+ self.servers_client.confirm_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+ allocations = self.placement_client.list_allocations(server['id'])
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+ @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @utils.services('compute', 'network')
+ def test_resize_with_qos_min_bw_allocation(self):
+ """Scenario to resize VM with QoS min bw allocation in placement.
+
+ Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+ checks, and
+ * resize the server with new flavor
+ * confirm the resize, if the VM state is VERIFY_RESIZE
+ * If the VM goes to ACTIVE state check that allocations are as
+ expected.
+ """
+ server, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
+ allocations = self.placement_client.list_allocations(server['id'])
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+ old_flavor = self.flavors_client.show_flavor(
+ CONF.compute.flavor_ref)['flavor']
+ new_flavor = self.flavors_client.create_flavor(**{
+ 'ram': old_flavor['ram'],
+ 'vcpus': old_flavor['vcpus'],
+ 'name': old_flavor['name'] + 'extra',
+ 'disk': old_flavor['disk'] + 1
+ })['flavor']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.flavors_client.delete_flavor, new_flavor['id'])
+
+ self.servers_client.resize_server(
+ server_id=server['id'], flavor_ref=new_flavor['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+ allocations = self.placement_client.list_allocations(server['id'])
+
+ # TODO(lajoskatona): Check that the allocations are ok for the
+ # migration?
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
+
+ self.servers_client.confirm_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+ allocations = self.placement_client.list_allocations(server['id'])
+ self._assert_allocation_is_as_expected(allocations, valid_port['id'])
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f45eec0..ff74877 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
from tempest.lib.services.volume.v2 import volumes_client
from tempest.tests import base
import tempest.tests.utils as utils
@@ -384,3 +385,54 @@
uuids.attachment_id)
# Assert that show volume is only called once before we return
show_volume.assert_called_once_with(uuids.volume_id)
+
+ def test_wait_for_volume_attachment_remove_from_server(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ volume_not_attached = {"volumeAttachments": []}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_not_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ waiters.wait_for_volume_attachment_remove_from_server(
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
+
+ def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove_from_server,
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
new file mode 100644
index 0000000..3deb944
--- /dev/null
+++ b/zuul.d/base.yaml
@@ -0,0 +1,86 @@
+- job:
+ name: devstack-tempest
+ parent: devstack
+ description: |
+ Base Tempest job.
+
+ This Tempest job provides the base for both the single and multi-node
+ test setup. To run a multi-node test inherit from devstack-tempest and
+ set the nodeset to a multi-node one.
+ required-projects: &base_required-projects
+ - opendev.org/openstack/tempest
+ timeout: 7200
+ roles: &base_roles
+ - zuul: opendev.org/openstack/devstack
+ vars: &base_vars
+ devstack_services:
+ tempest: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute:
+ min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+ test_results_stage_name: test_results
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+ '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+ '{{ devstack_base_dir }}/tempest/tempest.log': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+ '{{ stage_dir }}/stackviz': logs
+ extensions_to_txt:
+ conf: true
+ log: true
+ yaml: true
+ yml: true
+ run: playbooks/devstack-tempest.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: devstack-tempest-ipv6
+ parent: devstack-ipv6
+ description: |
+ Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+ which set the IPv6-only setting for OpenStack services. As part of
+ run phase, this job will verify the IPv6 setting and check the services
+ endpoints and listen addresses are IPv6. Basically it will run the script
+ ./tool/verify-ipv6-only-deployments.sh
+
+ Child jobs of this job can run their own set of tests and can
+ add post-run playebooks to extend the IPv6 verification specific
+ to their deployed services.
+ Check the wiki page for more details about project jobs setup
+ - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+ required-projects: *base_required-projects
+ timeout: 7200
+ roles: *base_roles
+ vars: *base_vars
+ run: playbooks/devstack-tempest-ipv6.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: tempest-multinode-full-base
+ parent: devstack-tempest
+ description: |
+ Base multinode integration test with Neutron networking and py27.
+ Former names for this job were:
+ * neutron-tempest-multinode-full
+ * legacy-tempest-dsvm-neutron-multinode-full
+ * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+ This job includes two nodes, controller / tempest plus a subnode, but
+ it can be used with different topologies, as long as a controller node
+ and a tempest one exist.
+ timeout: 10800
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE: false
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+ group-vars:
+ peers:
+ devstack_localrc:
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
new file mode 100644
index 0000000..bf689c5
--- /dev/null
+++ b/zuul.d/integrated-gate.yaml
@@ -0,0 +1,441 @@
+# NOTE(gmann): This file includes all integrated jobs definition which
+# are supposed to be run by Tempest and other projects as
+# integrated testing.
+- job:
+ name: tempest-all
+ parent: devstack-tempest
+ description: |
+ Integration test that runs all tests.
+ Former name for this job was:
+ * legacy-periodic-tempest-dsvm-all-master
+ vars:
+ tox_envlist: all
+ tempest_test_regex: tempest
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+
+- job:
+ name: tempest-ipv6-only
+ parent: devstack-tempest-ipv6
+ # This currently works from stable/pike on.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Integration test of IPv6-only deployments. This job runs
+ smoke and IPv6 relates tests only. Basic idea is to test
+ whether OpenStack Services listen on IPv6 addrress or not.
+ timeout: 10800
+ vars:
+ tox_envlist: ipv6-only
+
+- job:
+ name: tempest-full
+ parent: devstack-tempest
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking and py27.
+ This job is supposed to run until stable/train setup only.
+ If you are running it on stable/ussuri gate onwards for python2.7
+ coverage then you need to do override-checkout with any stable
+ branch less than or equal to stable/train.
+ Former names for this job where:
+ * legacy-tempest-dsvm-neutron-full
+ * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ USE_PYTHON3: False
+ devstack_services:
+ # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+ # since tempest-full is in the integrated-gate project template but
+ # the backup tests do not really involve other services so they should
+ # be run in some more cinder-specific job, especially because the
+ # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+ c-bak: false
+
+- job:
+ name: tempest-full-py3
+ parent: devstack-tempest
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking and py3.
+ Former names for this job where:
+ * legacy-tempest-dsvm-py35
+ * gate-tempest-dsvm-py35
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
+ c-bak: false
+ neutron-placement: true
+ neutron-qos: true
+
+- job:
+ name: tempest-integrated-networking
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for networking. This is subset of
+ 'tempest-full-py3' job and run only Neutron and Nova related tests.
+ This is meant to be run on neutron gate only.
+ vars:
+ tox_envlist: integrated-network
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-compute
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for compute. This is
+ subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+ and Glance related tests. This is meant to be run on Nova gate only.
+ vars:
+ tox_envlist: integrated-compute
+ tempest_black_regex: ""
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-placement
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for placement. This is
+ subset of 'tempest-full-py3' job and run Nova and Neutron
+ related tests. This is meant to be run on Placement gate only.
+ vars:
+ tox_envlist: integrated-placement
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for image & block storage. This is
+ subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
+ related tests. This is meant to be run on Cinder and Glance gate only.
+ vars:
+ tox_envlist: integrated-storage
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+
+- job:
+ name: tempest-integrated-object-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for object storage. This is
+ subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
+ related tests. This is meant to be run on Swift gate only.
+ vars:
+ tox_envlist: integrated-object-storage
+ devstack_localrc:
+ # NOTE(gmann): swift is not ready on python3 yet and devstack
+ # install it on python2.7 only. But settting the USE_PYTHON3
+ # for future once swift is ready on py3.
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-focal
+ # This job runs on Focal from stable/victoria on.
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+ # This job is prepared to make sure all stable branches from stable/stein till stable/train
+ # will keep running on bionic. This can be removed once stable/train is EOL.
+ branches:
+ - stable/stein
+ - stable/train
+ - stable/ussuri
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full-py3
+ parent: tempest-multinode-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-slow
+ parent: tempest-multinode-full
+ description: |
+ This multinode integration job will run all the tests tagged as slow.
+ It enables the lvm multibackend setup to cover few scenario tests.
+ This job will run only slow tests (API or Scenario) serially.
+
+ Former names for this job were:
+ * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+ * tempest-scenario-multinode-lvm-multibackend
+ timeout: 10800
+ vars:
+ tox_envlist: slow-serial
+ devstack_localrc:
+ CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-cinder-v2-api
+ parent: devstack-tempest
+ branches:
+ - master
+ description: |
+ This job runs the cinder API test against v2 endpoint.
+ vars:
+ tox_envlist: all
+ tempest_test_regex: api.*volume
+ devstack_localrc:
+ TEMPEST_VOLUME_TYPE: volumev2
+
+- job:
+ name: tempest-pg-full
+ parent: tempest-full-py3
+ description: |
+ Base integration test with Neutron networking and PostgreSQL.
+ Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+ vars:
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+ DATABASE_TYPE: postgresql
+
+- project-template:
+ name: integrated-gate-networking
+ description: |
+ Run the python3 Tempest network integration tests (Nova and Neutron related)
+ in check and gate for the neutron integrated gate. This is meant to be
+ run on neutron gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+
+- project-template:
+ name: integrated-gate-compute
+ description: |
+ Run the python3 Tempest compute integration tests
+ (Nova, Neutron, Cinder and Glance related) in check and gate
+ for the Nova integrated gate. This is meant to be
+ run on Nova gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+
+- project-template:
+ name: integrated-gate-placement
+ description: |
+ Run the python3 Tempest placement integration tests
+ (Nova and Neutron related) in check and gate
+ for the Placement integrated gate. This is meant to be
+ run on Placement gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+
+- project-template:
+ name: integrated-gate-storage
+ description: |
+ Run the python3 Tempest image & block storage integration tests
+ (Cinder, Glance, Swift and Nova related) in check and gate
+ for the neutron integrated gate. This is meant to be
+ run on Cinder and Glance gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+
+- project-template:
+ name: integrated-gate-object-storage
+ description: |
+ Run the python3 Tempest object storage integration tests
+ (Swift, Cinder and Glance related) in check and gate
+ for the swift integrated gate. This is meant to be
+ run on swift gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..f2522af
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,146 @@
+- project:
+ templates:
+ - check-requirements
+ - integrated-gate-py3
+ - openstack-cover-jobs
+ - openstack-python3-victoria-jobs
+ - publish-openstack-docs-pti
+ - release-notes-jobs-python3
+ check:
+ jobs:
+ - devstack-tempest:
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - devstack-tempest-ipv6:
+ voting: false
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - tempest-full-parallel:
+ # Define list of irrelevant files to use everywhere else
+ irrelevant-files: &tempest-irrelevant-files
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - ^tools/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-ipv6:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - glance-multistore-cinder-import:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-victoria-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-ussuri-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-train-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-stein-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-multinode-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-tox-plugin-sanity-check:
+ irrelevant-files: &tempest-irrelevant-files-2
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ # tools/ is not here since this relies on a script in tools/.
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-live-migration:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - devstack-plugin-ceph-tempest-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-grenade-multinode:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario001-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario002-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario003-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario004-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-tempest-dvr:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - interop-tempest-consistency:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-no-admin-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - openstack-tox-bashate:
+ irrelevant-files: *tempest-irrelevant-files-2
+ gate:
+ jobs:
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-grenade-multinode:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
+ - devstack-plugin-ceph-tempest-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ experimental:
+ jobs:
+ - tempest-cinder-v2-api:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-all:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-tempest-dvr-ha-multinode-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-tempest-v2-api:
+ irrelevant-files: *tempest-irrelevant-files
+ - cinder-tempest-lvm-multibackend:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-pg-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-opensuse15:
+ irrelevant-files: *tempest-irrelevant-files
+ periodic-stable:
+ jobs:
+ - tempest-full-victoria-py3
+ - tempest-full-ussuri-py3
+ - tempest-full-train-py3
+ - tempest-full-stein-py3
+ periodic:
+ jobs:
+ - tempest-all
+ - tempest-full-oslo-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
new file mode 100644
index 0000000..832a0d5
--- /dev/null
+++ b/zuul.d/stable-jobs.yaml
@@ -0,0 +1,23 @@
+# NOTE(gmann): This file includes all stable release jobs definition.
+- job:
+ name: tempest-full-victoria-py3
+ parent: tempest-full-py3
+ override-checkout: stable/victoria
+
+- job:
+ name: tempest-full-ussuri-py3
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-bionic
+ override-checkout: stable/ussuri
+
+- job:
+ name: tempest-full-train-py3
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-bionic
+ override-checkout: stable/train
+
+- job:
+ name: tempest-full-stein-py3
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-bionic
+ override-checkout: stable/stein
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
new file mode 100644
index 0000000..387a94b
--- /dev/null
+++ b/zuul.d/tempest-specific.yaml
@@ -0,0 +1,113 @@
+# NOTE(gmann): This file includes all tempest specific jobs definition which
+# are supposed to be run by Tempest gate only.
+- job:
+ name: tempest-full-oslo-master
+ parent: tempest-full-py3
+ description: |
+ Integration test using current git of oslo libs.
+ This ensures that when oslo libs get released that they
+ do not break OpenStack server projects.
+
+ Former name for this job was
+ periodic-tempest-dsvm-oslo-latest-full-master.
+ timeout: 10800
+ required-projects:
+ - opendev.org/openstack/oslo.cache
+ - opendev.org/openstack/oslo.concurrency
+ - opendev.org/openstack/oslo.config
+ - opendev.org/openstack/oslo.context
+ - opendev.org/openstack/oslo.db
+ - opendev.org/openstack/oslo.i18n
+ - opendev.org/openstack/oslo.log
+ - opendev.org/openstack/oslo.messaging
+ - opendev.org/openstack/oslo.middleware
+ - opendev.org/openstack/oslo.policy
+ - opendev.org/openstack/oslo.privsep
+ - opendev.org/openstack/oslo.reports
+ - opendev.org/openstack/oslo.rootwrap
+ - opendev.org/openstack/oslo.serialization
+ - opendev.org/openstack/oslo.service
+ - opendev.org/openstack/oslo.utils
+ - opendev.org/openstack/oslo.versionedobjects
+ - opendev.org/openstack/oslo.vmware
+
+- job:
+ name: tempest-full-parallel
+ parent: tempest-full-py3
+ voting: false
+ branches:
+ - master
+ description: |
+ Base integration test with Neutron networking.
+ It includes all scenarios as it was in the past.
+ This job runs all scenario tests in parallel!
+ timeout: 9000
+ vars:
+ tox_envlist: full-parallel
+ run_tempest_cleanup: true
+ run_tempest_dry_cleanup: true
+
+- job:
+ name: tempest-full-py3-ipv6
+ parent: devstack-tempest-ipv6
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking, IPv6 and py3.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
+ name: tempest-full-py3-opensuse15
+ parent: tempest-full-py3
+ nodeset: devstack-single-node-opensuse-15
+ description: |
+ Base integration test with Neutron networking and py36 running
+ on openSUSE Leap 15.x
+ voting: false
+
+- job:
+ name: tempest-tox-plugin-sanity-check
+ parent: tox
+ description: |
+ Run tempest plugin sanity check script using tox.
+ nodeset: ubuntu-focal
+ vars:
+ tox_envlist: plugin-sanity-check
+ timeout: 5000
+
+- job:
+ name: tempest-full-test-account-py3
+ parent: tempest-full-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials instead of dynamic credentials and py3.
+ Former names for this job were:
+ - legacy-tempest-dsvm-full-test-accounts
+ - legacy-tempest-dsvm-neutron-full-test-accounts
+ - legacy-tempest-dsvm-identity-v3-test-accounts
+ vars:
+ devstack_localrc:
+ TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+ name: tempest-full-test-account-no-admin-py3
+ parent: tempest-full-test-account-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials and py3 without having an admin account.
+ Former name for this job was:
+ - legacy-tempest-dsvm-neutron-full-non-admin
+
+ vars:
+ devstack_localrc:
+ TEMPEST_HAS_ADMIN: False