Merge "Allow kwargs in create_volume_snapshot"
diff --git a/.zuul.yaml b/.zuul.yaml
deleted file mode 100644
index c20f204..0000000
--- a/.zuul.yaml
+++ /dev/null
@@ -1,778 +0,0 @@
-- job:
- name: devstack-tempest
- parent: devstack
- description: |
- Base Tempest job.
-
- This Tempest job provides the base for both the single and multi-node
- test setup. To run a multi-node test inherit from devstack-tempest and
- set the nodeset to a multi-node one.
- required-projects: &base_required-projects
- - opendev.org/openstack/tempest
- timeout: 7200
- roles: &base_roles
- - zuul: opendev.org/openstack/devstack
- vars: &base_vars
- devstack_services:
- tempest: true
- devstack_local_conf:
- test-config:
- $TEMPEST_CONFIG:
- compute:
- min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
- test_results_stage_name: test_results
- zuul_copy_output:
- '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
- '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
- '{{ devstack_base_dir }}/tempest/tempest.log': logs
- '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
- '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
- '{{ stage_dir }}/stackviz': logs
- extensions_to_txt:
- conf: true
- log: true
- yaml: true
- yml: true
- run: playbooks/devstack-tempest.yaml
- post-run: playbooks/post-tempest.yaml
-
-- job:
- name: tempest-all
- parent: devstack-tempest
- description: |
- Integration test that runs all tests.
- Former name for this job was:
- * legacy-periodic-tempest-dsvm-all-master
- vars:
- tox_envlist: all
- tempest_test_regex: tempest
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
-
-- job:
- name: devstack-tempest-ipv6
- parent: devstack-ipv6
- description: |
- Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
- which set the IPv6-only setting for OpenStack services. As part of
- run phase, this job will verify the IPv6 setting and check the services
- endpoints and listen addresses are IPv6. Basically it will run the script
- ./tool/verify-ipv6-only-deployments.sh
-
- Child jobs of this job can run their own set of tests and can
- add post-run playebooks to extend the IPv6 verification specific
- to their deployed services.
- Check the wiki page for more details about project jobs setup
- - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
- required-projects: *base_required-projects
- timeout: 7200
- roles: *base_roles
- vars: *base_vars
- run: playbooks/devstack-tempest-ipv6.yaml
- post-run: playbooks/post-tempest.yaml
-
-- job:
- name: tempest-ipv6-only
- parent: devstack-tempest-ipv6
- # This currently works from stable/pike on.
- branches: ^(?!stable/ocata).*$
- description: |
- Integration test of IPv6-only deployments. This job runs
- smoke and IPv6 relates tests only. Basic idea is to test
- whether OpenStack Services listen on IPv6 addrress or not.
- timeout: 10800
- vars:
- tox_envlist: ipv6-only
-
-- job:
- name: tempest-full
- parent: devstack-tempest
- # This currently works from stable/pike on.
- # Before stable/pike, legacy version of tempest-full
- # 'legacy-tempest-dsvm-neutron-full' run.
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking and py27.
- This job is supposed to run until stable/train setup only.
- If you are running it on stable/ussuri gate onwards for python2.7
- coverage then you need to do override-checkout with any stable
- branch less than or equal to stable/train.
- Former names for this job where:
- * legacy-tempest-dsvm-neutron-full
- * gate-tempest-dsvm-neutron-full-ubuntu-xenial
- vars:
- tox_envlist: full
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
- ENABLE_VOLUME_MULTIATTACH: true
- USE_PYTHON3: False
- devstack_services:
- # NOTE(mriedem): Disable the cinder-backup service from tempest-full
- # since tempest-full is in the integrated-gate project template but
- # the backup tests do not really involve other services so they should
- # be run in some more cinder-specific job, especially because the
- # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
- c-bak: false
-
-- job:
- name: tempest-full-oslo-master
- parent: tempest-full-py3
- description: |
- Integration test using current git of oslo libs.
- This ensures that when oslo libs get released that they
- do not break OpenStack server projects.
-
- Former name for this job was
- periodic-tempest-dsvm-oslo-latest-full-master.
- timeout: 10800
- required-projects:
- - opendev.org/openstack/oslo.cache
- - opendev.org/openstack/oslo.concurrency
- - opendev.org/openstack/oslo.config
- - opendev.org/openstack/oslo.context
- - opendev.org/openstack/oslo.db
- - opendev.org/openstack/oslo.i18n
- - opendev.org/openstack/oslo.log
- - opendev.org/openstack/oslo.messaging
- - opendev.org/openstack/oslo.middleware
- - opendev.org/openstack/oslo.policy
- - opendev.org/openstack/oslo.privsep
- - opendev.org/openstack/oslo.reports
- - opendev.org/openstack/oslo.rootwrap
- - opendev.org/openstack/oslo.serialization
- - opendev.org/openstack/oslo.service
- - opendev.org/openstack/oslo.utils
- - opendev.org/openstack/oslo.versionedobjects
- - opendev.org/openstack/oslo.vmware
-
-- job:
- name: tempest-full-parallel
- parent: tempest-full-py3
- voting: false
- branches:
- - master
- description: |
- Base integration test with Neutron networking.
- It includes all scenarios as it was in the past.
- This job runs all scenario tests in parallel!
- timeout: 9000
- vars:
- tox_envlist: full-parallel
- run_tempest_cleanup: true
- run_tempest_dry_cleanup: true
-
-- job:
- name: tempest-full-py3
- parent: devstack-tempest
- # This currently works from stable/pike on.
- # Before stable/pike, legacy version of tempest-full
- # 'legacy-tempest-dsvm-neutron-full' run.
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking and py3.
- Former names for this job where:
- * legacy-tempest-dsvm-py35
- * gate-tempest-dsvm-py35
- vars:
- tox_envlist: full
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- GLANCE_USE_IMPORT_WORKFLOW: True
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- # NOTE(mriedem): Disable the cinder-backup service from
- # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
- # project template but the backup tests do not really involve other
- # services so they should be run in some more cinder-specific job,
- # especially because the tests fail at a high rate (see bugs 1483434,
- # 1813217, 1745168)
- c-bak: false
-
-- job:
- name: tempest-integrated-networking
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for networking. This is subset of
- 'tempest-full-py3' job and run only Neutron and Nova related tests.
- This is meant to be run on neutron gate only.
- vars:
- tox_envlist: integrated-network
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-compute
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for compute. This is
- subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
- and Glance related tests. This is meant to be run on Nova gate only.
- vars:
- tox_envlist: integrated-compute
- tempest_black_regex: ""
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-placement
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for placement. This is
- subset of 'tempest-full-py3' job and run Nova and Neutron
- related tests. This is meant to be run on Placement gate only.
- vars:
- tox_envlist: integrated-placement
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- c-bak: false
-
-- job:
- name: tempest-integrated-storage
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for image & block storage. This is
- subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
- related tests. This is meant to be run on Cinder and Glance gate only.
- vars:
- tox_envlist: integrated-storage
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- ENABLE_VOLUME_MULTIATTACH: true
- GLANCE_USE_IMPORT_WORKFLOW: True
-
-- job:
- name: tempest-integrated-object-storage
- parent: devstack-tempest
- branches: ^(?!stable/ocata).*$
- description: |
- This job runs integration tests for object storage. This is
- subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
- related tests. This is meant to be run on Swift gate only.
- vars:
- tox_envlist: integrated-object-storage
- devstack_localrc:
- # NOTE(gmann): swift is not ready on python3 yet and devstack
- # install it on python2.7 only. But settting the USE_PYTHON3
- # for future once swift is ready on py3.
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-py3-ipv6
- parent: devstack-tempest-ipv6
- branches: ^(?!stable/ocata).*$
- description: |
- Base integration test with Neutron networking, IPv6 and py3.
- vars:
- tox_envlist: full
- devstack_localrc:
- USE_PYTHON3: true
- FORCE_CONFIG_DRIVE: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- c-bak: false
-
-- job:
- name: tempest-multinode-full-base
- parent: devstack-tempest
- description: |
- Base multinode integration test with Neutron networking and py27.
- Former names for this job were:
- * neutron-tempest-multinode-full
- * legacy-tempest-dsvm-neutron-multinode-full
- * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
- This job includes two nodes, controller / tempest plus a subnode, but
- it can be used with different topologies, as long as a controller node
- and a tempest one exist.
- timeout: 10800
- vars:
- tox_envlist: full
- devstack_localrc:
- FORCE_CONFIG_DRIVE: false
- NOVA_ALLOW_MOVE_TO_SAME_HOST: false
- LIVE_MIGRATION_AVAILABLE: true
- USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
- group-vars:
- peers:
- devstack_localrc:
- NOVA_ALLOW_MOVE_TO_SAME_HOST: false
- LIVE_MIGRATION_AVAILABLE: true
- USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-focal
- # This job runs on Focal from stable/victoria on.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-bionic
- # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
- # This job is prepared to make sure all stable branches from stable/stein till stable/train
- # will keep running on bionic. This can be removed once stable/train is EOL.
- branches:
- - stable/stein
- - stable/train
- - stable/ussuri
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full
- parent: tempest-multinode-full-base
- nodeset: openstack-two-node-xenial
- # This job runs on Xenial and this is for stable/pike, stable/queens
- # and stable/rocky. This job is prepared to make sure all stable branches
- # before stable/stein will keep running on xenial. This job can be
- # removed once stable/rocky is EOL.
- branches:
- - stable/pike
- - stable/queens
- - stable/rocky
- vars:
- devstack_localrc:
- USE_PYTHON3: False
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: False
-
-- job:
- name: tempest-multinode-full-py3
- parent: tempest-multinode-full
- vars:
- devstack_localrc:
- USE_PYTHON3: true
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-py3-opensuse15
- parent: tempest-full-py3
- nodeset: devstack-single-node-opensuse-15
- description: |
- Base integration test with Neutron networking and py36 running
- on openSUSE Leap 15.x
- voting: false
-
-- job:
- name: tempest-slow
- parent: tempest-multinode-full
- description: |
- This multinode integration job will run all the tests tagged as slow.
- It enables the lvm multibackend setup to cover few scenario tests.
- This job will run only slow tests (API or Scenario) serially.
-
- Former names for this job were:
- * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
- * tempest-scenario-multinode-lvm-multibackend
- timeout: 10800
- vars:
- tox_envlist: slow-serial
- devstack_localrc:
- CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
- ENABLE_VOLUME_MULTIATTACH: true
- devstack_plugins:
- neutron: https://opendev.org/openstack/neutron
- devstack_services:
- neutron-placement: true
- neutron-qos: true
- devstack_local_conf:
- post-config:
- "/$NEUTRON_CORE_PLUGIN_CONF":
- ovs:
- bridge_mappings: public:br-ex
- resource_provider_bandwidths: br-ex:1000000:1000000
- test-config:
- $TEMPEST_CONFIG:
- network-feature-enabled:
- qos_placement_physnet: public
- tempest_concurrency: 2
- group-vars:
- # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
- # the controller and subnode prior to Rocky so we have to make sure the
- # variable is set in both locations.
- subnode:
- devstack_localrc:
- ENABLE_VOLUME_MULTIATTACH: true
-
-- job:
- name: tempest-slow-py3
- parent: tempest-slow
- vars:
- devstack_localrc:
- USE_PYTHON3: true
- devstack_services:
- s-account: false
- s-container: false
- s-object: false
- s-proxy: false
- # without Swift, c-bak cannot run (in the Gate at least)
- c-bak: false
- group-vars:
- subnode:
- devstack_localrc:
- USE_PYTHON3: true
-
-- job:
- name: tempest-full-victoria-py3
- parent: tempest-full-py3
- override-checkout: stable/victoria
-
-- job:
- name: tempest-full-ussuri-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
-
-- job:
- name: tempest-full-train-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/train
-
-- job:
- name: tempest-full-stein-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/stein
-
-- job:
- name: tempest-tox-plugin-sanity-check
- parent: tox
- description: |
- Run tempest plugin sanity check script using tox.
- nodeset: ubuntu-focal
- vars:
- tox_envlist: plugin-sanity-check
- timeout: 5000
-
-- job:
- name: tempest-cinder-v2-api
- parent: devstack-tempest
- branches:
- - master
- description: |
- This job runs the cinder API test against v2 endpoint.
- vars:
- tox_envlist: all
- tempest_test_regex: api.*volume
- devstack_localrc:
- TEMPEST_VOLUME_TYPE: volumev2
-
-- job:
- name: tempest-full-test-account-py3
- parent: tempest-full-py3
- description: |
- This job runs the full set of tempest tests using pre-provisioned
- credentials instead of dynamic credentials and py3.
- Former names for this job were:
- - legacy-tempest-dsvm-full-test-accounts
- - legacy-tempest-dsvm-neutron-full-test-accounts
- - legacy-tempest-dsvm-identity-v3-test-accounts
- vars:
- devstack_localrc:
- TEMPEST_USE_TEST_ACCOUNTS: True
-
-- job:
- name: tempest-full-test-account-no-admin-py3
- parent: tempest-full-test-account-py3
- description: |
- This job runs the full set of tempest tests using pre-provisioned
- credentials and py3 without having an admin account.
- Former name for this job was:
- - legacy-tempest-dsvm-neutron-full-non-admin
-
- vars:
- devstack_localrc:
- TEMPEST_HAS_ADMIN: False
-
-- job:
- name: tempest-pg-full
- parent: tempest-full-py3
- description: |
- Base integration test with Neutron networking and PostgreSQL.
- Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
- vars:
- devstack_localrc:
- ENABLE_FILE_INJECTION: true
- DATABASE_TYPE: postgresql
-
-- project-template:
- name: integrated-gate-networking
- description: |
- Run the python3 Tempest network integration tests (Nova and Neutron related)
- in check and gate for the neutron integrated gate. This is meant to be
- run on neutron gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-networking
- gate:
- jobs:
- - grenade
- - tempest-integrated-networking
-
-- project-template:
- name: integrated-gate-compute
- description: |
- Run the python3 Tempest compute integration tests
- (Nova, Neutron, Cinder and Glance related) in check and gate
- for the Nova integrated gate. This is meant to be
- run on Nova gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-compute
- gate:
- jobs:
- - grenade
- - tempest-integrated-compute
-
-- project-template:
- name: integrated-gate-placement
- description: |
- Run the python3 Tempest placement integration tests
- (Nova and Neutron related) in check and gate
- for the Placement integrated gate. This is meant to be
- run on Placement gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-placement
- gate:
- jobs:
- - grenade
- - tempest-integrated-placement
-
-- project-template:
- name: integrated-gate-storage
- description: |
- Run the python3 Tempest image & block storage integration tests
- (Cinder, Glance, Swift and Nova related) in check and gate
- for the neutron integrated gate. This is meant to be
- run on Cinder and Glance gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-storage
- gate:
- jobs:
- - grenade
- - tempest-integrated-storage
-
-- project-template:
- name: integrated-gate-object-storage
- description: |
- Run the python3 Tempest object storage integration tests
- (Swift, Cinder and Glance related) in check and gate
- for the swift integrated gate. This is meant to be
- run on swift gate only.
- check:
- jobs:
- - grenade
- - tempest-integrated-object-storage
- gate:
- jobs:
- - grenade
- - tempest-integrated-object-storage
-
-- project:
- templates:
- - check-requirements
- - integrated-gate-py3
- - openstack-cover-jobs
- - openstack-python3-victoria-jobs
- - publish-openstack-docs-pti
- - release-notes-jobs-python3
- check:
- jobs:
- - devstack-tempest:
- files:
- - ^playbooks/
- - ^roles/
- - ^.zuul.yaml$
- - devstack-tempest-ipv6:
- voting: false
- files:
- - ^playbooks/
- - ^roles/
- - ^.zuul.yaml$
- - tempest-full-parallel:
- # Define list of irrelevant files to use everywhere else
- irrelevant-files: &tempest-irrelevant-files
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^tools/.*$
- - ^.coveragerc$
- - ^.gitignore$
- - ^.gitreview$
- - ^.mailmap$
- - tempest-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-ipv6:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - glance-multistore-cinder-import:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-victoria-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-ussuri-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-train-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-stein-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-multinode-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-tox-plugin-sanity-check:
- irrelevant-files: &tempest-irrelevant-files-2
- - ^.*\.rst$
- - ^doc/.*$
- - ^etc/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tempest/hacking/.*$
- - ^tempest/tests/.*$
- - ^.coveragerc$
- - ^.gitignore$
- - ^.gitreview$
- - ^.mailmap$
- # tools/ is not here since this relies on a script in tools/.
- - tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
- - tempest-slow-py3:
- irrelevant-files: *tempest-irrelevant-files
- - nova-live-migration:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
- irrelevant-files: *tempest-irrelevant-files
- - grenade:
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario001-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario002-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario003-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - puppet-openstack-integration-4-scenario004-tempest-centos-7:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - interop-tempest-consistency:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-test-account-py3:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-test-account-no-admin-py3:
- voting: false
- irrelevant-files: *tempest-irrelevant-files
- - openstack-tox-bashate:
- irrelevant-files: *tempest-irrelevant-files-2
- gate:
- jobs:
- - tempest-slow-py3:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-grenade-multinode:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3:
- irrelevant-files: *tempest-irrelevant-files
- - grenade:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-ipv6-only:
- irrelevant-files: *tempest-irrelevant-files-2
- - devstack-plugin-ceph-tempest-py3:
- irrelevant-files: *tempest-irrelevant-files
- experimental:
- jobs:
- - tempest-cinder-v2-api:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-all:
- irrelevant-files: *tempest-irrelevant-files
- - neutron-tempest-dvr-ha-multinode-full:
- irrelevant-files: *tempest-irrelevant-files
- - nova-tempest-v2-api:
- irrelevant-files: *tempest-irrelevant-files
- - cinder-tempest-lvm-multibackend:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-pg-full:
- irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse15:
- irrelevant-files: *tempest-irrelevant-files
- periodic-stable:
- jobs:
- - tempest-full-victoria-py3
- - tempest-full-ussuri-py3
- - tempest-full-train-py3
- - tempest-full-stein-py3
- periodic:
- jobs:
- - tempest-all
- - tempest-full-oslo-master
diff --git a/REVIEWING.rst b/REVIEWING.rst
index e07e358..4c63aa0 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -160,13 +160,11 @@
When to approve
---------------
* It's OK to hold off on an approval until a subject matter expert reviews it.
-* Every patch needs two +2's before being approved.
-* However, a single Tempest core reviewer can approve patches without waiting
- for another +2 in the following cases:
+* Every patch needs at least single +2's before being approved. A single
+ Tempest core reviewer can approve patches but can always wait for another
+ +2 in any case. Following cases where single +2 can be used without any
+ issue:
- * If a patch has already been approved but requires a trivial rebase to
- merge, then there is no need to wait for a second +2, since the patch has
- already had two +2's.
* If any trivial patch set fixes one of the items below:
* Documentation or code comment typo
@@ -187,7 +185,4 @@
voting ``tempest-tox-plugin-sanity-check`` job) and unblock the
tempest gate
- Note that such a policy should be used judiciously, as we should strive to
- have two +2's on each patch set, prior to approval.
-
.. _example: https://review.opendev.org/#/c/611032/
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 9c79a1f..62953ff 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -43,10 +43,9 @@
Getting Your Patch Merged
~~~~~~~~~~~~~~~~~~~~~~~~~
-All changes proposed to the Tempest require two ``Code-Review +2`` votes from
-Tempest core reviewers before one of the core reviewers can approve the patch by
-giving ``Workflow +1`` vote. More detailed guidelines for reviewers are available
-at :doc:`../REVIEWING`.
+All changes proposed to the Tempest require single ``Code-Review +2`` votes from
+Tempest core reviewers by giving ``Workflow +1`` vote. More detailed guidelines
+for reviewers are available at :doc:`../REVIEWING`.
Project Team Lead Duties
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/data/tempest-blacklisted-plugins-registry.header b/doc/source/data/tempest-non-active-plugins-registry.header
similarity index 67%
rename from doc/source/data/tempest-blacklisted-plugins-registry.header
rename to doc/source/data/tempest-non-active-plugins-registry.header
index 6b6af11..06d8eaa 100644
--- a/doc/source/data/tempest-blacklisted-plugins-registry.header
+++ b/doc/source/data/tempest-non-active-plugins-registry.header
@@ -1,7 +1,7 @@
-Blacklisted Plugins
+Non Active Plugins
===================
List of Tempest plugin projects that are stale or unmaintained for a long
-time (6 months or more). They can be moved out of blacklist state once one
+time (6 months or more). They can be moved out of nonactivelist state once one
of the relevant patches gets merged:
https://review.opendev.org/#/q/topic:tempest-sanity-gate+%28status:open%29
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index c7004dd..06062c2 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -126,16 +126,16 @@
.. code-block:: python
- class BaseTestCase1(api_version_utils.BaseMicroversionTest):
+ class BaseTestCase1(api_version_utils.BaseMicroversionTest):
- [..]
- @classmethod
- def skip_checks(cls):
- super(BaseTestCase1, cls).skip_checks()
- api_version_utils.check_skip_with_microversion(cls.min_microversion,
- cls.max_microversion,
- CONF.compute.min_microversion,
- CONF.compute.max_microversion)
+ [..]
+ @classmethod
+ def skip_checks(cls):
+ super(BaseTestCase1, cls).skip_checks()
+ api_version_utils.check_skip_with_microversion(cls.min_microversion,
+ cls.max_microversion,
+ CONF.compute.min_microversion,
+ CONF.compute.max_microversion)
Skip logic can be added in tests base class or any specific test class depends on
tests class structure.
@@ -302,6 +302,10 @@
.. _2.2: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id2
+ * `2.3`_
+
+ .. _2.3: http://docs.openstack.org/developer/nova/api_microversion_history.html#maximum-in-kilo
+
* `2.6`_
.. _2.6: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id5
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index e51b90b..2eaf72f 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -113,7 +113,7 @@
There is also the option to use `stestr`_ directly. For example, from
the workspace dir run::
- $ stestr run --black-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
+ $ stestr run --exclude-regex '\[.*\bslow\b.*\]' '^tempest\.(api|scenario)'
will run the same set of tests as the default gate jobs. Or you can
use `unittest`_ compatible test runners such as `stestr`_, `pytest`_ etc.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index ab1b0b1..6726def 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -268,12 +268,12 @@
class MyAPIClient(rest_client.RestClient):
- def __init__(self, auth_provider, service, region,
- my_arg, my_arg2=True, **kwargs):
- super(MyAPIClient, self).__init__(
- auth_provider, service, region, **kwargs)
- self.my_arg = my_arg
- self.my_args2 = my_arg
+ def __init__(self, auth_provider, service, region,
+ my_arg, my_arg2=True, **kwargs):
+ super(MyAPIClient, self).__init__(
+ auth_provider, service, region, **kwargs)
+ self.my_arg = my_arg
+ self.my_args2 = my_arg
Finally the service client should be structured in a python module, so that all
service client classes are importable from it. Each major API version should
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 87e3ad1..9c2d1ed 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -20,7 +20,7 @@
testing branches in these phases, it's possible that we'll introduce changes to
Tempest on master which will break support on *Extended Maintenance* phase
branches. When this happens the expectation for those branches is to either
-switch to running Tempest from a tag with support for the branch, or blacklist
+switch to running Tempest from a tag with support for the branch, or exclude
a newly introduced test (if that is the cause of the issue). Tempest will not
be creating stable branches to support *Extended Maintenance* phase branches, as
the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 388b4cd..4ca7f0d 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,9 @@
Tempest master supports the below OpenStack Releases:
+* Victoria
* Ussuri
* Train
-* Stein
For older OpenStack Release:
@@ -34,3 +34,4 @@
* Python 3.6
* Python 3.7
+* Python 3.8
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 0a29b7b..34df089 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -76,54 +76,54 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def skip_checks(cls):
- """This section is used to evaluate config early and skip all test
- methods based on these checks
- """
- super(TestExampleCase, cls).skip_checks()
- if not CONF.section.foo
- cls.skip('A helpful message')
+ @classmethod
+ def skip_checks(cls):
+ """This section is used to evaluate config early and skip all test
+ methods based on these checks
+ """
+ super(TestExampleCase, cls).skip_checks()
+ if not CONF.section.foo
+ cls.skip('A helpful message')
- @classmethod
- def setup_credentials(cls):
- """This section is used to do any manual credential allocation and also
- in the case of dynamic credentials to override the default network
- resource creation/auto allocation
- """
- # This call is used to tell the credential allocator to not create any
- # network resources for this test case. It also enables selective
- # creation of other neutron resources. NOTE: it must go before the
- # super call
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ """This section is used to do any manual credential allocation and also
+ in the case of dynamic credentials to override the default network
+ resource creation/auto allocation
+ """
+ # This call is used to tell the credential allocator to not create any
+ # network resources for this test case. It also enables selective
+ # creation of other neutron resources. NOTE: it must go before the
+ # super call
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
- @classmethod
- def setup_clients(cls):
- """This section is used to setup client aliases from the manager object
- or to initialize any additional clients. Except in a few very
- specific situations you should not need to use this.
- """
- super(TestExampleCase, cls).setup_clients()
- cls.servers_client = cls.os_primary.servers_client
+ @classmethod
+ def setup_clients(cls):
+ """This section is used to setup client aliases from the manager object
+ or to initialize any additional clients. Except in a few very
+ specific situations you should not need to use this.
+ """
+ super(TestExampleCase, cls).setup_clients()
+ cls.servers_client = cls.os_primary.servers_client
- @classmethod
- def resource_setup(cls):
- """This section is used to create any resources or objects which are
- going to be used and shared by **all** test methods in the
- TestCase. Note then anything created in this section must also be
- destroyed in the corresponding resource_cleanup() method (which will
- be run during tearDownClass())
- """
- super(TestExampleCase, cls).resource_setup()
- cls.shared_server = cls.servers_client.create_server(...)
- cls.addClassResourceCleanup(waiters.wait_for_server_termination,
- cls.servers_client,
- cls.shared_server['id'])
- cls.addClassResourceCleanup(
- test_utils.call_and_ignore_notfound_exc(
- cls.servers_client.delete_server,
- cls.shared_server['id']))
+ @classmethod
+ def resource_setup(cls):
+ """This section is used to create any resources or objects which are
+ going to be used and shared by **all** test methods in the
+ TestCase. Note then anything created in this section must also be
+ destroyed in the corresponding resource_cleanup() method (which will
+ be run during tearDownClass())
+ """
+ super(TestExampleCase, cls).resource_setup()
+ cls.shared_server = cls.servers_client.create_server(...)
+ cls.addClassResourceCleanup(waiters.wait_for_server_termination,
+ cls.servers_client,
+ cls.shared_server['id'])
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc(
+ cls.servers_client.delete_server,
+ cls.shared_server['id']))
.. _credentials:
@@ -150,9 +150,9 @@
credentials = ['primary', 'admin']
- @classmethod
- def skip_checks(cls):
- ...
+ @classmethod
+ def skip_checks(cls):
+ ...
In this example the ``TestExampleAdmin`` TestCase will allocate 2 sets of
credentials, one regular user and one admin user. The corresponding manager
@@ -225,10 +225,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True, router=False)
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(network=True, subnet=True, router=False)
+ super(TestExampleCase, cls).setup_credentials()
There are 2 quirks with the usage here. First for the set_network_resources
function to work properly it **must be called before super()**. This is so
@@ -242,10 +242,10 @@
class TestExampleCase(test.BaseTestCase):
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources()
- super(TestExampleCase, cls).setup_credentials()
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources()
+ super(TestExampleCase, cls).setup_credentials()
This will not allocate any networking resources. This is because by default all
the arguments default to False.
@@ -282,8 +282,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- self.os_primary.servers_client.create_server(...)
+ def test_example_create_server(self):
+ self.os_primary.servers_client.create_server(...)
is all you need to do. As described previously, in the above example the
``self.os_primary`` is created automatically because the base test class sets the
@@ -305,8 +305,8 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
The credentials object provides access to all of the credential information you
would need to make API requests. For example, building off the previous
@@ -316,9 +316,9 @@
class TestExampleCase(test.BaseTestCase):
- def test_example_create_server(self):
- credentials = self.os_primary.credentials
- username = credentials.username
- user_id = credentials.user_id
- password = credentials.password
- tenant_id = credentials.tenant_id
+ def test_example_create_server(self):
+ credentials = self.os_primary.credentials
+ username = credentials.username
+ user_id = credentials.user_id
+ password = credentials.password
+ tenant_id = credentials.tenant_id
diff --git a/etc/whitelist.yaml b/etc/allow-list.yaml
similarity index 100%
rename from etc/whitelist.yaml
rename to etc/allow-list.yaml
diff --git a/etc/rbac-persona-accounts.yaml.sample b/etc/rbac-persona-accounts.yaml.sample
new file mode 100644
index 0000000..0b59538
--- /dev/null
+++ b/etc/rbac-persona-accounts.yaml.sample
@@ -0,0 +1,108 @@
+- user_domain_name: Default
+ password: password
+ roles:
+ - admin
+ username: tempest-system-admin-1
+ system: all
+- user_domain_name: Default
+ password: password
+ username: tempest-system-member-1
+ roles:
+ - member
+ system: all
+- user_domain_name: Default
+ password: password
+ username: tempest-system-reader-1
+ roles:
+ - reader
+ system: all
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-admin-1
+ roles:
+ - admin
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-member-1
+ roles:
+ - member
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-reader-1
+ roles:
+ - reader
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-admin-1
+ roles:
+ - admin
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-member-1
+ roles:
+ - member
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-reader-1
+ roles:
+ - reader
+- user_domain_name: Default
+ password: password
+ username: tempest-system-admin-2
+ roles:
+ - admin
+ system: all
+- user_domain_name: Default
+ password: password
+ username: tempest-system-member-2
+ roles:
+ - member
+ system: all
+- user_domain_name: Default
+ password: password
+ system: all
+ username: tempest-system-reader-2
+ roles:
+ - reader
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-admin-2
+ roles:
+ - admin
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-member-2
+ roles:
+ - member
+- user_domain_name: Default
+ password: password
+ domain_name: tempest-test-domain
+ username: tempest-domain-reader-2
+ roles:
+ - reader
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-admin-2
+ roles:
+ - admin
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-member-2
+ roles:
+ - member
+- user_domain_name: Default
+ password: password
+ project_name: tempest-test-project
+ username: tempest-project-reader-2
+ roles:
+ - reader
diff --git a/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
new file mode 100644
index 0000000..33df7c4
--- /dev/null
+++ b/releasenotes/notes/Add-keystone-v3-OS_FEDERATION-APIs-as-tempest-clients-fe9e10a0fe5f09d4.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ The following tempest clients for keystone v3 OS_FEDERATION API were
+ implemented in this release
+
+ * identity_providers
+ * protocols
+ * mappings
+ * service_providers
diff --git a/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
new file mode 100644
index 0000000..089569e
--- /dev/null
+++ b/releasenotes/notes/Inclusive-jargon-17621346744f0cf4.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+ - |
+ In this release the following tempest arguments are deprecated and
+ replaced by new ones which are functionally equivalent:
+
+ * --black-regex is replaced by --exclude-regex
+ * --blacklist-file is replaced by --exclude-list
+ * --whitelist-file is replaced by --include-list
+
+ For now Tempest supports both (new and old ones) in order to make the
+ transition for all consumers smoother. However, that's just a temporary
+ case and the old options will be removed soon.
diff --git a/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
new file mode 100644
index 0000000..822df7d
--- /dev/null
+++ b/releasenotes/notes/Remove-manager-2e0b0af48f01294a.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ In this release tempest/manager.py is removed after more than 4 years
+ of deprecation.
diff --git a/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
new file mode 100644
index 0000000..121e060
--- /dev/null
+++ b/releasenotes/notes/add-compute-feature-shelve-migrate-fdbd3633abe65c4e.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new config option ``[compute-feature-enabled] shelve_migrate``
+ which enable test for environment that support cold migration of qcow2
+ unshelved instance.
diff --git a/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
new file mode 100644
index 0000000..1840c10
--- /dev/null
+++ b/releasenotes/notes/add-identity-roles-system-methods-519dc144231993a3.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Added methods to the identity v3 roles client to support:
+
+ - PUT /v3/system/users/{user}/roles/{role}
+ - GET /v3/system/users/{user}/roles
+ - GET /v3/system/users/{user}/roles/{role}
+ - DELETE /v3/system/users/{user}/roles/{role}
+ - PUT /v3/system/groups/{group}/roles/{role}
+ - GET /v3/system/groups/{group}/roles
+ - GET /v3/system/groups/{group}/roles/{role}
+ - DELETE /v3/system/groups/{group}/roles/{role}
diff --git a/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
new file mode 100644
index 0000000..8e42e85
--- /dev/null
+++ b/releasenotes/notes/associate-disassociate-floating_ip-0b6cfebeef1304b0.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added associate_floating_ip() and dissociate_floating_ip() methods
+ to the scenario manager.
diff --git a/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
new file mode 100644
index 0000000..e53411d
--- /dev/null
+++ b/releasenotes/notes/create_loginable_secgroup_rule-73722fd4b4eb12d0.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added public interface create_loginable_secgroup_rule().
+ Since this interface is meant to be used by tempest plugins,
+ It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
new file mode 100644
index 0000000..3354f65
--- /dev/null
+++ b/releasenotes/notes/create_security_group_rule-16d58a8f0f0ff262.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added public interface create_security_group_rule().
+ Since this interface is meant to be used by tempest plugins,
+ It doesn't neccessarily require to be private api.
diff --git a/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
new file mode 100644
index 0000000..fd7a874
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-stein-f795b968d83497a9.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+ This is an intermediate release during the Wallaby development cycle to
+ mark the end of support for EM Stein release in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Victoria
+ * Ussuri
+ * Train
+
+ Current development of Tempest is for OpenStack Wallaby development
+ cycle.
diff --git a/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
new file mode 100644
index 0000000..2779b26
--- /dev/null
+++ b/releasenotes/notes/log_console_output-dae6b8740b5a5821.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Added public interface log_console_output().
+ It used to be a private method with name _log_console_output().
+ Since this interface is meant to be used by tempest plugins,
+ It doesn't neccessarily require to be private api.
+
diff --git a/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
new file mode 100644
index 0000000..ff406fb
--- /dev/null
+++ b/releasenotes/notes/merge-tempest-horizon-plugin-39d555339ab8c7ce.yaml
@@ -0,0 +1,6 @@
+---
+prelude: >
+ The integrated horizon dashboard test is now moved
+ from tempest-horizon plugin into Tempest. You do not need
+ to install tempest-horizon to run the horizon test which
+ can be run using Tempest itself.
diff --git a/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
new file mode 100644
index 0000000..42322e4
--- /dev/null
+++ b/releasenotes/notes/random-bytes-size-limit-ee94a8c6534fe916.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The ``tempest.lib.common.utils.data_utils.random_bytes()`` helper
+ function will no longer allow a ``size`` of more than 1MiB. Tests
+ generally do not need to generate and use large payloads for
+ feature verification and it is easy to lose track of and duplicate
+ large buffers. The sum total of such errors can become problematic
+ in paralllelized and constrained CI environments.
diff --git a/releasenotes/notes/system-scope-44244cc955a7825f.yaml b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
new file mode 100644
index 0000000..969a71f
--- /dev/null
+++ b/releasenotes/notes/system-scope-44244cc955a7825f.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds new personas that can be used to test service policies for all
+ default scopes (project, domain, and system) and roles (reader, member,
+ and admin). Both dynamic credentials and pre-provisioned credentials are
+ supported.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index d8702f9..21f414e 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ v26.0.0
v24.0.0
v23.0.0
v22.1.0
diff --git a/releasenotes/source/v26.0.0.rst b/releasenotes/source/v26.0.0.rst
new file mode 100644
index 0000000..4161f89
--- /dev/null
+++ b/releasenotes/source/v26.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v26.0.0 Release Notes
+=====================
+.. release-notes:: 26.0.0 Release Notes
+ :version: 26.0.0
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 3643edb..f9fcf28 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -32,7 +32,11 @@
.. zuul:rolevar:: tempest_test_blacklist
- Specifies a blacklist file to skip tests that are not needed.
+ DEPRECATED option, please use tempest_test_exclude_list instead.
+
+.. zuul:rolevar:: tempest_test_exclude_list
+
+ Specifies an excludelist file to skip tests that are not needed.
Pass a full path to the file.
@@ -44,6 +48,11 @@
.. zuul:rolevar:: tempest_black_regex
:default: ''
+ DEPRECATED option, please use tempest_exclude_regex instead.
+
+.. zuul:rolevar:: tempest_exclude_regex
+ :default: ''
+
A regular expression used to skip the tests.
It works only when used with some specific tox environments
@@ -51,7 +60,7 @@
::
vars:
- tempest_black_regex: (tempest.api.identity).*$
+ tempest_exclude_regex: (tempest.api.identity).*$
.. zuul:rolevar:: tox_extra_args
:default: ''
diff --git a/roles/run-tempest/defaults/main.yaml b/roles/run-tempest/defaults/main.yaml
index 5867b6c..e7a1cc6 100644
--- a/roles/run-tempest/defaults/main.yaml
+++ b/roles/run-tempest/defaults/main.yaml
@@ -1,7 +1,10 @@
devstack_base_dir: /opt/stack
tempest_test_regex: ''
tox_envlist: smoke
+# TODO(kopecmartin) remove tempest_black_regex once all consumers of this
+# role have switched to the tempest_exclude_regex option.
tempest_black_regex: ''
+tempest_exclude_regex: ''
tox_extra_args: ''
tempest_test_timeout: ''
stable_constraints_file: "{{ devstack_base_dir }}/requirements/upper-constraints.txt"
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 1de3725..e9c8e92 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -36,6 +36,9 @@
tempest_tox_environment: "{{ tempest_tox_environment | combine({'OS_TEST_TIMEOUT': tempest_test_timeout}) }}"
when: tempest_test_timeout != ''
+# TODO(kopecmartin) remove the following 'when block' after all consumers of
+# the role have switched to tempest_test_exclude_list option, until then it's
+# kept here for backward compatibility
- when:
- tempest_test_blacklist is defined
block:
@@ -50,10 +53,48 @@
blacklist_option: "--blacklist-file={{ tempest_test_blacklist|quote }}"
when: blacklist_stat.stat.exists
+- when:
+ - tempest_test_exclude_list is defined
+ block:
+ - name: Check for test exclude list file
+ stat:
+ path: "{{ tempest_test_exclude_list }}"
+ register:
+ exclude_list_stat
+
+ - name: Build exclude list option
+ set_fact:
+ exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
+ when: exclude_list_stat.stat.exists
+
+# TODO(kopecmartin) remove this after all consumers of the role have switched
+# to tempest_exclude_regex option, until then it's kept here for the backward
+# compatibility
+- name: Set tempest_exclude_regex
+ set_fact:
+ tempest_exclude_regex: "{{ tempest_black_regex }}"
+ when:
+ - tempest_black_regex is defined
+ - tempest_exclude_regex is not defined
+
+- name: Build exclude regex (old param)
+ set_fact:
+ tempest_exclude_regex: "--black-regex={{tempest_black_regex|quote}}"
+ when:
+ - tempest_black_regex is defined
+
+- name: Build exclude regex (new param)
+ set_fact:
+ tempest_exclude_regex: "--exclude-regex={{tempest_exclude_regex|quote}}"
+ when:
+ - tempest_black_regex is not defined
+ - tempest_exclude_regex is defined
+
- name: Run Tempest
- command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} {{blacklist_option|default('')}} \
+ command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
+ {{blacklist_option|default('')}} {{exclude_list_option|default('')}} \
--concurrency={{tempest_concurrency|default(default_concurrency)}} \
- --black-regex={{tempest_black_regex|quote}}
+ {{tempest_exclude_regex|default('')}}
args:
chdir: "{{devstack_base_dir}}/tempest"
register: tempest_run_result
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 487337e..342380e 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -61,6 +61,8 @@
}
create_dict.update(kwargs)
new_image = self.image_client.create_image(**create_dict)
+ self.addCleanup(self.image_client.wait_for_resource_deletion,
+ new_image['id'])
self.addCleanup(self.image_client.delete_image, new_image['id'])
self.image_client.store_image_file(new_image['id'], image_file)
@@ -86,6 +88,18 @@
server = self.create_test_server(image_id=custom_img,
config_drive=True,
wait_until='ACTIVE')
+
+ # NOTE(lyarwood): self.create_test_server delete the server
+ # at class level cleanup so add server cleanup to ensure that
+ # the instance is deleted first before created image. This
+ # avoids failures when using the rbd backend is used for both
+ # Glance and Nova ephemeral storage. Also wait until server is
+ # deleted otherwise image deletion can start before server is
+ # deleted.
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(self.servers_client.delete_server, server['id'])
+
volume = self.create_volume()
attachment = self.attach_volume(server, volume)
waiters.wait_for_volume_resource_status(
@@ -98,7 +112,5 @@
server['id'], attachment['volumeId'])
waiters.wait_for_volume_resource_status(
self.volumes_client, attachment['volumeId'], 'available')
- volume_after_detach = self.servers_client.list_volume_attachments(
- server['id'])['volumeAttachments']
- self.assertEqual(0, len(volume_after_detach),
- "Failed to detach volume")
+ waiters.wait_for_volume_attachment_remove_from_server(
+ self.servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 0296220..275a26f 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -110,20 +110,30 @@
Creating another server image when first image is being saved is
not allowed.
"""
- # Create first snapshot
- image = self.create_image_from_server(self.server_id)
- self.addCleanup(self._reset_server)
+ try:
+ # Create first snapshot
+ image = self.create_image_from_server(self.server_id)
+ self.addCleanup(self._reset_server)
- # Create second snapshot
- self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
- self.server_id)
+ # Create second snapshot
+ self.assertRaises(lib_exc.Conflict, self.create_image_from_server,
+ self.server_id)
- if api_version_utils.compare_version_header_to_response(
- "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
- image_id = image['image_id']
- else:
- image_id = data_utils.parse_image_id(image.response['location'])
- self.client.delete_image(image_id)
+ if api_version_utils.compare_version_header_to_response(
+ "OpenStack-API-Version", "compute 2.45", image.response, "lt"):
+ image_id = image['image_id']
+ else:
+ image_id = data_utils.parse_image_id(
+ image.response['location'])
+ self.client.delete_image(image_id)
+
+ except lib_exc.TimeoutException as ex:
+ # Test cannot capture the image saving state.
+ # If timeout is reached, we don't need to check state,
+ # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # it, this testcase doesn't have scope for other state transition
+ # Hence, skip the test.
+ raise self.skipException("This test is skipped because " + str(ex))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('084f0cbc-500a-4963-8a4e-312905862581')
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 0601bbe..102792e 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -427,3 +427,33 @@
CONF.compute.build_interval, original_ip_count):
raise lib_exc.TimeoutException(
'Timed out while waiting for IP count to decrease.')
+
+
+class AttachInterfacesV270Test(AttachInterfacesTestBase):
+ """Test interface API with microversion greater than 2.69"""
+ min_microversion = '2.70'
+
+ @decorators.idempotent_id('2853f095-8277-4067-92bd-9f10bd4f8e0c')
+ @utils.services('network')
+ def test_create_get_list_interfaces(self):
+ """Test interface API with microversion greater than 2.69
+
+ Checking create, get, list interface APIs response schema.
+ """
+ server = self.create_test_server(wait_until='ACTIVE')
+ try:
+ iface = self.interfaces_client.create_interface(server['id'])[
+ 'interfaceAttachment']
+ iface = waiters.wait_for_interface_status(
+ self.interfaces_client, server['id'], iface['port_id'],
+ 'ACTIVE')
+ except lib_exc.BadRequest as e:
+ msg = ('Multiple possible networks found, use a Network ID to be '
+ 'more specific.')
+ if not CONF.compute.fixed_network_name and six.text_type(e) == msg:
+ raise
+ else:
+ # just to check the response schema
+ self.interfaces_client.show_interface(
+ server['id'], iface['port_id'])
+ self.interfaces_client.list_interfaces(server['id'])
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 7931ca9..6ebdbdb 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -26,11 +26,6 @@
CONF = config.CONF
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
"""Test novnc console"""
@@ -116,14 +111,14 @@
# single word(4 bytes).
self.assertEqual(
data_length, 4, 'Expected authentication type None.')
- self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+ self.assertIn(1, [int(data[i]) for i in (0, 3)],
'Expected authentication type None.')
else:
self.assertGreaterEqual(
len(data), 2, 'Expected authentication type None.')
self.assertIn(
1,
- [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+ [int(data[i + 1]) for i in range(int(data[0]))],
'Expected authentication type None.')
# Send to the server that we only support authentication
# type None
@@ -136,7 +131,7 @@
len(data), 4,
'Server did not think security was successful.')
self.assertEqual(
- [ord_func(i) for i in data], [0, 0, 0, 0],
+ [int(i) for i in data], [0, 0, 0, 0],
'Server did not think security was successful.')
# Say to leave the desktop as shared as part of client initialization
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index d85e4f7..7251e36 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -200,6 +200,10 @@
super(AttachVolumeShelveTestJSON, cls).skip_checks()
if not CONF.compute_feature_enabled.shelve:
raise cls.skipException('Shelve is not available.')
+ if CONF.compute.compute_volume_common_az:
+ # assuming cross_az_attach is set to false in nova.conf
+ # per the compute_volume_common_az option description
+ raise cls.skipException('Cross AZ attach not available.')
def _count_volumes(self, server, validation_resources):
# Count number of volumes on an instance
diff --git a/tempest/api/identity/admin/v3/test_roles.py b/tempest/api/identity/admin/v3/test_roles.py
index dd7d5af..e5137f4 100644
--- a/tempest/api/identity/admin/v3/test_roles.py
+++ b/tempest/api/identity/admin/v3/test_roles.py
@@ -142,6 +142,26 @@
self.roles_client.delete_role_from_user_on_domain(
self.domain['id'], self.user_body['id'], self.role['id'])
+ @testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
+ 'Skipped because environment has an immutable user '
+ 'source and solely provides read-only access to users.')
+ @decorators.idempotent_id('e5a81737-d294-424d-8189-8664858aae4c')
+ def test_grant_list_revoke_role_to_user_on_system(self):
+ self.roles_client.create_user_role_on_system(
+ self.user_body['id'], self.role['id'])
+
+ roles = self.roles_client.list_user_roles_on_system(
+ self.user_body['id'])['roles']
+
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
+
+ self.roles_client.check_user_role_existence_on_system(
+ self.user_body['id'], self.role['id'])
+
+ self.roles_client.delete_role_from_user_on_system(
+ self.user_body['id'], self.role['id'])
+
@decorators.idempotent_id('cbf11737-1904-4690-9613-97bcbb3df1c4')
@testtools.skipIf(CONF.identity_feature_enabled.immutable_user_source,
'Skipped because environment has an immutable user '
@@ -197,6 +217,23 @@
self.roles_client.delete_role_from_group_on_domain(
self.domain['id'], self.group_body['id'], self.role['id'])
+ @decorators.idempotent_id('c888fe4f-8018-48db-b959-542225c1b4b6')
+ def test_grant_list_revoke_role_to_group_on_system(self):
+ self.roles_client.create_group_role_on_system(
+ self.group_body['id'], self.role['id'])
+
+ roles = self.roles_client.list_group_roles_on_system(
+ self.group_body['id'])['roles']
+
+ self.assertEqual(1, len(roles))
+ self.assertEqual(self.role['id'], roles[0]['id'])
+
+ self.roles_client.check_role_from_group_on_system_existence(
+ self.group_body['id'], self.role['id'])
+
+ self.roles_client.delete_role_from_group_on_system(
+ self.group_body['id'], self.role['id'])
+
@decorators.idempotent_id('f5654bcc-08c4-4f71-88fe-05d64e06de94')
def test_list_roles(self):
"""Test listing roles"""
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 28299a4..ca72388 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -90,7 +90,7 @@
self.assertEqual('uploading', body['status'])
# import image from staging to backend
self.client.image_import(image['id'], method='glance-direct')
- self.client.wait_for_resource_activation(image['id'])
+ waiters.wait_for_image_imported_to_stores(self.client, image['id'])
@decorators.idempotent_id('f6feb7a4-b04f-4706-a011-206129f83e62')
def test_image_web_download_import(self):
@@ -111,7 +111,7 @@
image_uri = CONF.image.http_image
self.client.image_import(image['id'], method='web-download',
image_uri=image_uri)
- self.client.wait_for_resource_activation(image['id'])
+ waiters.wait_for_image_imported_to_stores(self.client, image['id'])
class MultiStoresImportImagesTest(base.BaseV2ImageTest):
@@ -158,7 +158,7 @@
self.client.stage_image_file(
image['id'],
- six.BytesIO(data_utils.random_bytes(10485760)))
+ six.BytesIO(data_utils.random_bytes()))
# Check image status is 'uploading'
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
@@ -402,7 +402,8 @@
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
- sorted_list = [image['size'] for image in images_list]
+ sorted_list = [image['size'] for image in images_list
+ if image['size'] is not None]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index d82b6df..2669ff7 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -22,10 +22,10 @@
* ``--regex/-r``: This is a selection regex like what stestr uses. It will run
any tests that match on re.match() with the regex
* ``--smoke/-s``: Run all the tests tagged as smoke
-* ``--black-regex``: It allows to do simple test exclusion via passing a
- rejection/black regexp
+* ``--exclude-regex``: It allows to do simple test exclusion via passing a
+ rejection/exclude regexp
-There are also the ``--blacklist-file`` and ``--whitelist-file`` options that
+There are also the ``--exclude-list`` and ``--include-list`` options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
@@ -128,8 +128,8 @@
import sys
from cliff import command
+from oslo_log import log
from oslo_serialization import jsonutils as json
-import six
from stestr import commands
from tempest import clients
@@ -139,13 +139,11 @@
from tempest.common import credentials_factory as credentials
from tempest import config
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
+LOG = log.getLogger(__name__)
+
class TempestRun(command.Command):
@@ -167,7 +165,7 @@
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
- if six.PY3 and 'PYTHON' not in os.environ:
+ if 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_stestr_conf(self):
@@ -206,23 +204,71 @@
self._init_state()
regex = self._build_regex(parsed_args)
+
+ # temporary method for parsing deprecated and new stestr options
+ # and showing warning messages in order to make the transition
+ # smoother for all tempest consumers
+ # TODO(kopecmartin) remove this after stestr>=3.1.0 is used
+ # in all supported OpenStack releases
+ def parse_dep(old_o, old_v, new_o, new_v):
+ ret = ''
+ if old_v:
+ LOG.warning("'%s' option is deprecated, use '%s' instead "
+ "which is functionally equivalent. Right now "
+ "Tempest still supports this option for "
+ "backward compatibility, however, it will be "
+ "removed soon.",
+ old_o, new_o)
+ ret = old_v
+ if old_v and new_v:
+ # both options are specified
+ LOG.warning("'%s' and '%s' are specified at the same time, "
+ "'%s' takes precedence over '%s'",
+ new_o, old_o, new_o, old_o)
+ if new_v:
+ ret = new_v
+ return ret
+ ex_regex = parse_dep('--black-regex', parsed_args.black_regex,
+ '--exclude-regex', parsed_args.exclude_regex)
+ ex_list = parse_dep('--blacklist-file', parsed_args.blacklist_file,
+ '--exclude-list', parsed_args.exclude_list)
+ in_list = parse_dep('--whitelist-file', parsed_args.whitelist_file,
+ '--include-list', parsed_args.include_list)
+
return_code = 0
if parsed_args.list_tests:
- return_code = commands.list_command(
- filters=regex, whitelist_file=parsed_args.whitelist_file,
- blacklist_file=parsed_args.blacklist_file,
- black_regex=parsed_args.black_regex)
+ try:
+ return_code = commands.list_command(
+ filters=regex, include_list=in_list,
+ exclude_list=ex_list, exclude_regex=ex_regex)
+ except TypeError:
+ # exclude_list, include_list and exclude_regex are defined only
+ # in stestr >= 3.1.0, this except block catches the case when
+ # tempest is executed with an older stestr
+ return_code = commands.list_command(
+ filters=regex, whitelist_file=in_list,
+ blacklist_file=ex_list, black_regex=ex_regex)
else:
serial = not parsed_args.parallel
- return_code = commands.run_command(
- filters=regex, subunit_out=parsed_args.subunit,
- serial=serial, concurrency=parsed_args.concurrency,
- blacklist_file=parsed_args.blacklist_file,
- whitelist_file=parsed_args.whitelist_file,
- black_regex=parsed_args.black_regex,
- worker_path=parsed_args.worker_file,
- load_list=parsed_args.load_list, combine=parsed_args.combine)
+ params = {
+ 'filters': regex, 'subunit_out': parsed_args.subunit,
+ 'serial': serial, 'concurrency': parsed_args.concurrency,
+ 'worker_path': parsed_args.worker_file,
+ 'load_list': parsed_args.load_list,
+ 'combine': parsed_args.combine
+ }
+ try:
+ return_code = commands.run_command(
+ **params, exclude_list=ex_list,
+ include_list=in_list, exclude_regex=ex_regex)
+ except TypeError:
+ # exclude_list, include_list and exclude_regex are defined only
+ # in stestr >= 3.1.0, this except block catches the case when
+ # tempest is executed with an older stestr
+ return_code = commands.run_command(
+ **params, blacklist_file=ex_list,
+ whitelist_file=in_list, black_regex=ex_regex)
if return_code > 0:
sys.exit(return_code)
return return_code
@@ -276,15 +322,38 @@
help='A normal stestr selection regex used to '
'specify a subset of tests to run')
parser.add_argument('--black-regex', dest='black_regex',
+ help='DEPRECATED: This option is deprecated and '
+ 'will be removed soon, use --exclude-regex '
+ 'which is functionally equivalent. If this '
+ 'is specified at the same time as '
+ '--exclude-regex, this flag will be ignored '
+ 'and --exclude-regex will be used')
+ parser.add_argument('--exclude-regex', dest='exclude_regex',
help='A regex to exclude tests that match it')
parser.add_argument('--whitelist-file', '--whitelist_file',
- help="Path to a whitelist file, this file "
- "contains a separate regex on each "
- "newline.")
+ help='DEPRECATED: This option is deprecated and '
+ 'will be removed soon, use --include-list '
+ 'which is functionally equivalent. If this '
+ 'is specified at the same time as '
+ '--include-list, this flag will be ignored '
+ 'and --include-list will be used')
+ parser.add_argument('--include-list', '--include_list',
+ help="Path to an include file which contains the "
+ "regex for tests to be included in tempest "
+ "run, this file contains a separate regex on "
+ "each newline.")
parser.add_argument('--blacklist-file', '--blacklist_file',
- help='Path to a blacklist file, this file '
- 'contains a separate regex exclude on '
- 'each newline')
+ help='DEPRECATED: This option is deprecated and '
+ 'will be removed soon, use --exclude-list '
+ 'which is functionally equivalent. If this '
+ 'is specified at the same time as '
+ '--exclude-list, this flag will be ignored '
+ 'and --exclude-list will be used')
+ parser.add_argument('--exclude-list', '--exclude_list',
+ help='Path to an exclude file which contains the '
+ 'regex for tests to be excluded in tempest '
+ 'run, this file contains a separate regex on '
+ 'each newline.')
parser.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
'this file contains a separate test '
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index da3a4a9..42f68f1 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -19,7 +19,6 @@
import struct
import textwrap
-import six
from six.moves.urllib import parse as urlparse
from oslo_log import log as logging
@@ -31,11 +30,6 @@
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -371,8 +365,8 @@
# frames less than 125 bytes here (for the negotiation) and
# that only the 2nd byte contains the length, and since the
# server doesn't do masking, we can just read the data length
- if ord_func(header[1]) & 127 > 0:
- return self._recv(ord_func(header[1]) & 127)
+ if int(header[1]) & 127 > 0:
+ return self._recv(int(header[1]) & 127)
def send_frame(self, data):
"""Wrapper for sending data to add in the WebSocket frame format."""
@@ -389,7 +383,7 @@
frame_bytes.append(mask[i])
# Mask each of the actual data bytes that we are going to send
for i in range(len(data)):
- frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+ frame_bytes.append(int(data[i]) ^ mask[i % 4])
# Convert our integer list to a binary array of bytes
frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
self._socket.sendall(frame_bytes)
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index c6e5dcb..2d486a7 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -245,6 +245,9 @@
if identity_version == 'v3':
conf_attributes.append('domain_name')
+ conf_attributes.append('user_domain_name')
+ conf_attributes.append('project_domain_name')
+ conf_attributes.append('system')
# Read the parts of credentials from config
params = config.service_client_config()
for attr in conf_attributes:
@@ -284,7 +287,8 @@
if identity_version == 'v3':
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
- if not domain_fields.intersection(kwargs.keys()):
+ if (not params.get('system') and
+ not domain_fields.intersection(kwargs.keys())):
domain_name = CONF.auth.default_credentials_domain_name
# NOTE(andreaf) Setting domain_name implicitly sets user and
# project domain names, if they are None
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 914acf7..38881ee 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -59,6 +59,7 @@
# So we should set this True here.
'identity': True,
'object_storage': CONF.service_available.swift,
+ 'dashboard': CONF.service_available.horizon,
}
return service_list
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 17796df..eaac05e 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -193,26 +193,34 @@
raise lib_exc.TimeoutException(message)
-def wait_for_image_imported_to_stores(client, image_id, stores):
+def wait_for_image_imported_to_stores(client, image_id, stores=None):
"""Waits for an image to be imported to all requested stores.
+ Short circuits to fail if the serer reports failure of any store.
+ If stores is None, just wait for status==active.
+
The client should also have build_interval and build_timeout attributes.
"""
+ exc_cls = lib_exc.TimeoutException
start = int(time.time())
while int(time.time()) - start < client.build_timeout:
image = client.show_image(image_id)
- if image['status'] == 'active' and image['stores'] == stores:
+ if image['status'] == 'active' and (stores is None or
+ image['stores'] == stores):
return
+ if image.get('os_glance_failed_import'):
+ exc_cls = lib_exc.OtherRestClientException
+ break
time.sleep(client.build_interval)
- message = ('Image %(image_id)s failed to import '
- 'on stores: %s' % str(image['os_glance_failed_import']))
+ message = ('Image %s failed to import on stores: %s' %
+ (image_id, str(image.get('os_glance_failed_import'))))
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
- raise lib_exc.TimeoutException(message)
+ raise exc_cls(message)
def wait_for_image_copied_to_stores(client, image_id):
@@ -238,8 +246,8 @@
time.sleep(client.build_interval)
- message = ('Image %(image_id)s failed to finish the copy operation '
- 'on stores: %s' % str(store_left))
+ message = ('Image %s failed to finish the copy operation '
+ 'on stores: %s' % (image_id, str(store_left)))
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
@@ -317,6 +325,32 @@
'seconds', attachment_id, volume_id, time.time() - start)
+def wait_for_volume_attachment_remove_from_server(
+ client, server_id, volume_id):
+ """Waits for a volume to be removed from a given server.
+
+ This waiter checks the compute API if the volume attachment is removed.
+ """
+ start = int(time.time())
+ volumes = client.list_volume_attachments(server_id)['volumeAttachments']
+
+ while any(volume for volume in volumes if volume['volumeId'] == volume_id):
+ time.sleep(client.build_interval)
+
+ timed_out = int(time.time()) - start >= client.build_timeout
+ if timed_out:
+ message = ('Volume %s failed to detach from server %s within '
+ 'the required time (%s s) from the compute API '
+ 'perspective' %
+ (volume_id, server_id, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+
+ volumes = client.list_volume_attachments(server_id)[
+ 'volumeAttachments']
+
+ return volumes
+
+
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index 26a7fab..31d9b1b 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -92,7 +92,24 @@
cfg.StrOpt('admin_domain_name',
default='Default',
help="Admin domain name for authentication (Keystone V3). "
- "The same domain applies to user and project"),
+ "The same domain applies to user and project if "
+ "admin_user_domain_name and admin_project_domain_name "
+ "are not specified"),
+ cfg.StrOpt('admin_user_domain_name',
+ help="Domain name that contains the admin user (Keystone V3). "
+ "May be different from admin_project_domain_name and "
+ "admin_domain_name"),
+ cfg.StrOpt('admin_project_domain_name',
+ help="Domain name that contains the project given by "
+ "admin_project_name (Keystone V3). May be different from "
+ "admin_user_domain_name and admin_domain_name"),
+ cfg.StrOpt('admin_system',
+ default=None,
+ help="The system scope on which an admin user has an admin "
+ "role assignment, if any. Valid values are 'all' or None. "
+ "This must be set to 'all' if using the "
+ "[oslo_policy]/enforce_scope=true option for the "
+ "identity service."),
]
identity_group = cfg.OptGroup(name='identity',
@@ -452,6 +469,10 @@
cfg.BoolOpt('shelve',
default=True,
help="Does the test environment support shelving/unshelving?"),
+ cfg.BoolOpt('shelve_migrate',
+ default=False,
+ help="Does the test environment support "
+ "cold migration of unshelved server?"),
cfg.BoolOpt('suspend',
default=True,
help="Does the test environment support suspend/resume?"),
@@ -757,11 +778,13 @@
deprecated_reason="This config option is no longer "
"used anywhere, so it can be removed."),
cfg.StrOpt('port_vnic_type',
- choices=[None, 'normal', 'direct', 'macvtap'],
+ choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
+ 'baremetal', 'virtio-forwarder'],
help="vnic_type to use when launching instances"
" with pre-configured ports."
" Supported ports are:"
- " ['normal','direct','macvtap']"),
+ " ['normal', 'direct', 'macvtap', 'direct-physical', "
+ "'baremetal', 'virtio-forwarder']"),
cfg.Opt('port_profile',
type=ProfileType,
default={},
@@ -783,36 +806,37 @@
NetworkFeaturesGroup = [
cfg.BoolOpt('ipv6',
default=True,
- help="Allow the execution of IPv6 tests"),
+ help="Allow the execution of IPv6 tests."),
cfg.ListOpt('api_extensions',
default=['all'],
help="A list of enabled network extensions with a special "
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
- "To get the list of extensions run: 'neutron ext-list'"),
+ "To get the list of extensions run: "
+ "'openstack extension list --network'"),
cfg.ListOpt('available_features',
default=['all'],
help="A list of available network features with a special "
"entry all that indicates every feature is available. "
- "Empty list indicates all features are disabled."
+ "Empty list indicates all features are disabled. "
"This list can contain features that are not "
- "discoverable through API."),
+ "discoverable through the API."),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
"the extended IPv6 attributes ipv6_ra_mode "
- "and ipv6_address_mode"
+ "and ipv6_address_mode."
),
cfg.BoolOpt('port_admin_state_change',
default=True,
- help="Does the test environment support changing"
- " port admin state"),
+ help="Does the test environment support changing "
+ "port admin state?"),
cfg.BoolOpt('port_security',
default=False,
help="Does the test environment support port security?"),
cfg.BoolOpt('floating_ips',
default=True,
- help='Does the test environment support floating_ips'),
+ help='Does the test environment support floating_ips?'),
cfg.StrOpt('qos_placement_physnet', default=None,
help='Name of the physnet for placement based minimum '
'bandwidth allocation.'),
@@ -821,6 +845,18 @@
'This value will be increased in case of conflict.')
]
+dashboard_group = cfg.OptGroup(name="dashboard",
+ title="Dashboard options")
+
+DashboardGroup = [
+ cfg.StrOpt('dashboard_url',
+ default='http://localhost/',
+ help="Where the dashboard can be found"),
+ cfg.BoolOpt('disable_ssl_certificate_validation',
+ default=False,
+ help="Set to True if using self-signed SSL certificates."),
+]
+
validation_group = cfg.OptGroup(name='validation',
title='SSH Validation options')
@@ -1166,6 +1202,42 @@
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
+ cfg.BoolOpt('horizon',
+ default=True,
+ help="Whether or not horizon is expected to be available"),
+]
+
+enforce_scope_group = cfg.OptGroup(name="enforce_scope",
+ title="OpenStack Services with "
+ "enforce scope")
+
+
+EnforceScopeGroup = [
+ cfg.BoolOpt('nova',
+ default=False,
+ help='Does the compute service API policies enforce scope? '
+ 'This configuration value should be same as '
+ 'nova.conf: [oslo_policy].enforce_scope option.'),
+ cfg.BoolOpt('neutron',
+ default=False,
+ help='Does the network service API policies enforce scope? '
+ 'This configuration value should be same as '
+ 'neutron.conf: [oslo_policy].enforce_scope option.'),
+ cfg.BoolOpt('glance',
+ default=False,
+ help='Does the Image service API policies enforce scope? '
+ 'This configuration value should be same as '
+ 'glance.conf: [oslo_policy].enforce_scope option.'),
+ cfg.BoolOpt('cinder',
+ default=False,
+ help='Does the Volume service API policies enforce scope? '
+ 'This configuration value should be same as '
+ 'cinder.conf: [oslo_policy].enforce_scope option.'),
+ cfg.BoolOpt('keystone',
+ default=False,
+ help='Does the Identity service API policies enforce scope? '
+ 'This configuration value should be same as '
+ 'keystone.conf: [oslo_policy].enforce_scope option.'),
]
debug_group = cfg.OptGroup(name="debug",
@@ -1229,6 +1301,7 @@
(image_feature_group, ImageFeaturesGroup),
(network_group, NetworkGroup),
(network_feature_group, NetworkFeaturesGroup),
+ (dashboard_group, DashboardGroup),
(validation_group, ValidationGroup),
(volume_group, VolumeGroup),
(volume_feature_group, VolumeFeaturesGroup),
@@ -1236,6 +1309,7 @@
(object_storage_feature_group, ObjectStoreFeaturesGroup),
(scenario_group, ScenarioGroup),
(service_available_group, ServiceAvailableGroup),
+ (enforce_scope_group, EnforceScopeGroup),
(debug_group, DebugGroup),
(placement_group, PlacementGroup),
(profiler_group, ProfilerGroup),
@@ -1296,6 +1370,7 @@
self.image_feature_enabled = _CONF['image-feature-enabled']
self.network = _CONF.network
self.network_feature_enabled = _CONF['network-feature-enabled']
+ self.dashboard = _CONF.dashboard
self.validation = _CONF.validation
self.volume = _CONF.volume
self.volume_feature_enabled = _CONF['volume-feature-enabled']
@@ -1304,6 +1379,7 @@
'object-storage-feature-enabled']
self.scenario = _CONF.scenario
self.service_available = _CONF.service_available
+ self.enforce_scope = _CONF.enforce_scope
self.debug = _CONF.debug
logging.tempest_set_log_file('tempest.log')
# Setting attributes for plugins
diff --git a/tempest/lib/api_schema/response/compute/v2_70/interfaces.py b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
new file mode 100644
index 0000000..3160b92
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_70/interfaces.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import interfaces
+
+# ****** Schemas changed in microversion 2.70 *****************
+#
+# 1. add optional field 'tag' in the Response body of the following APIs:
+# - GET /servers/{server_id}/os-interface
+# - POST /servers/{server_id}/os-interface
+# - GET /servers/{server_id}/os-interface/{port_id}
+
+get_create_interfaces = copy.deepcopy(interfaces.get_create_interfaces)
+get_create_interfaces['response_body']['properties']['interfaceAttachment'][
+ 'properties'].update({'tag': {'type': ['string', 'null']}})
+
+list_interfaces = copy.deepcopy(interfaces.list_interfaces)
+list_interfaces['response_body']['properties']['interfaceAttachments'][
+ 'items']['properties'].update({'tag': {'type': ['string', 'null']}})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.1 ***
+delete_interface = copy.deepcopy(interfaces.delete_interface)
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 7c279ab..9f8c7c5 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -428,7 +428,7 @@
class KeystoneV3AuthProvider(KeystoneAuthProvider):
"""Provides authentication based on the Identity V3 API"""
- SCOPES = set(['project', 'domain', 'unscoped', None])
+ SCOPES = set(['system', 'project', 'domain', 'unscoped', None])
def _auth_client(self, auth_url):
return json_v3id.V3TokenClient(
@@ -441,8 +441,8 @@
Fields available in Credentials are passed to the token request,
depending on the value of scope. Valid values for scope are: "project",
- "domain". Any other string (e.g. "unscoped") or None will lead to an
- unscoped token request.
+ "domain", or "system". Any other string (e.g. "unscoped") or None will
+ lead to an unscoped token request.
"""
auth_params = dict(
@@ -465,12 +465,16 @@
domain_id=self.credentials.domain_id,
domain_name=self.credentials.domain_name)
+ if self.scope == 'system':
+ auth_params.update(system='all')
+
return auth_params
def _fill_credentials(self, auth_data_body):
- # project or domain, depending on the scope
+ # project, domain, or system depending on the scope
project = auth_data_body.get('project', None)
domain = auth_data_body.get('domain', None)
+ system = auth_data_body.get('system', None)
# user is always there
user = auth_data_body['user']
# Set project fields
@@ -490,6 +494,9 @@
self.credentials.domain_id = domain['id']
if self.credentials.domain_name is None:
self.credentials.domain_name = domain['name']
+ # Set system scope
+ if system is not None:
+ self.credentials.system = 'all'
# Set user fields
if self.credentials.username is None:
self.credentials.username = user['name']
@@ -677,7 +684,8 @@
raise exceptions.InvalidCredentials(msg)
for key in attr:
if key in self.ATTRIBUTES:
- setattr(self, key, attr[key])
+ if attr[key] is not None:
+ setattr(self, key, attr[key])
else:
msg = '%s is not a valid attr for %s' % (key, self.__class__)
raise exceptions.InvalidCredentials(msg)
@@ -779,7 +787,7 @@
ATTRIBUTES = ['domain_id', 'domain_name', 'password', 'username',
'project_domain_id', 'project_domain_name', 'project_id',
'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
- 'user_domain_name', 'user_id']
+ 'user_domain_name', 'user_id', 'system']
COLLISIONS = [('project_name', 'tenant_name'), ('project_id', 'tenant_id')]
def __setattr__(self, key, value):
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
import subprocess
from oslo_log import log as logging
-import six
from tempest.lib import base
import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
flags, action, params])
cmd = cmd.strip()
LOG.info("running: '%s'", cmd)
- if six.PY2:
- cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
cmd,
result,
result_err)
- if six.PY2:
- return result
- else:
- return os.fsdecode(result)
+ return os.fsdecode(result)
class CLIClient(object):
diff --git a/tempest/lib/common/cred_client.py b/tempest/lib/common/cred_client.py
index a81f53c..e16a565 100644
--- a/tempest/lib/common/cred_client.py
+++ b/tempest/lib/common/cred_client.py
@@ -39,11 +39,15 @@
self.projects_client = projects_client
self.roles_client = roles_client
- def create_user(self, username, password, project, email):
+ def create_user(self, username, password, project=None, email=None):
params = {'name': username,
- 'password': password,
- self.project_id_param: project['id'],
- 'email': email}
+ 'password': password}
+ # with keystone v3, a default project is not required
+ if project:
+ params[self.project_id_param] = project['id']
+ # email is not a first-class attribute of a user
+ if email:
+ params['email'] = email
user = self.users_client.create_user(**params)
if 'user' in user:
user = user['user']
@@ -83,12 +87,15 @@
role['id'], project['id'], user['id'])
@abc.abstractmethod
- def get_credentials(self, user, project, password):
+ def get_credentials(
+ self, user, project, password, domain=None, system=None):
"""Produces a Credentials object from the details provided
:param user: a user dict
- :param project: a project dict
+ :param project: a project dict or None if using domain or system scope
:param password: the password as a string
+ :param domain: a domain dict
+ :param system: a system dict
:return: a Credentials object with all the available credential details
"""
pass
@@ -116,7 +123,8 @@
def delete_project(self, project_id):
self.projects_client.delete_tenant(project_id)
- def get_credentials(self, user, project, password):
+ def get_credentials(
+ self, user, project, password, domain=None, system=None):
# User and project already include both ID and name here,
# so there's no need to use the fill_in mode
return auth.get_credentials(
@@ -156,23 +164,46 @@
def delete_project(self, project_id):
self.projects_client.delete_project(project_id)
- def get_credentials(self, user, project, password):
+ def create_domain(self, name, description):
+ domain = self.domains_client.create_domain(
+ name=name, description=description)['domain']
+ return domain
+
+ def delete_domain(self, domain_id):
+ self.domains_client.update_domain(domain_id, enabled=False)
+ self.domains_client.delete_domain(domain_id)
+
+ def get_credentials(
+ self, user, project, password, domain=None, system=None):
# User, project and domain already include both ID and name here,
# so there's no need to use the fill_in mode.
# NOTE(andreaf) We need to set all fields in the returned credentials.
# Scope is then used to pick only those relevant for the type of
# token needed by each service client.
+ if project:
+ project_name = project['name']
+ project_id = project['id']
+ else:
+ project_name = None
+ project_id = None
+ if domain:
+ domain_name = domain['name']
+ domain_id = domain['id']
+ else:
+ domain_name = self.creds_domain['name']
+ domain_id = self.creds_domain['id']
return auth.get_credentials(
auth_url=None,
fill_in=False,
identity_version='v3',
username=user['name'], user_id=user['id'],
- project_name=project['name'], project_id=project['id'],
+ project_name=project_name, project_id=project_id,
password=password,
project_domain_id=self.creds_domain['id'],
project_domain_name=self.creds_domain['name'],
- domain_id=self.creds_domain['id'],
- domain_name=self.creds_domain['name'])
+ domain_id=domain_id,
+ domain_name=domain_name,
+ system=system)
def assign_user_role_on_domain(self, user, role_name, domain=None):
"""Assign the specified role on a domain
@@ -197,6 +228,23 @@
LOG.debug("Role %s already assigned on domain %s for user %s",
role['id'], domain['id'], user['id'])
+ def assign_user_role_on_system(self, user, role_name):
+ """Assign the specified role on the system
+
+ :param user: a user dict
+ :param role_name: name of the role to be assigned
+ """
+ role = self._check_role_exists(role_name)
+ if not role:
+ msg = 'No "%s" role found' % role_name
+ raise lib_exc.NotFound(msg)
+ try:
+ self.roles_client.create_user_role_on_system(
+ user['id'], role['id'])
+ except lib_exc.Conflict:
+ LOG.debug("Role %s already assigned on the system for user %s",
+ role['id'], user['id'])
+
def get_creds_client(identity_client,
projects_client,
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 8b82391..ecbbe8f 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -142,7 +142,14 @@
else:
# We use a dedicated client manager for identity client in case we
# need a different token scope for them.
- scope = 'domain' if self.identity_admin_domain_scope else 'project'
+ if self.default_admin_creds.system:
+ scope = 'system'
+ elif (self.identity_admin_domain_scope and
+ (self.default_admin_creds.domain_id or
+ self.default_admin_creds.domain_name)):
+ scope = 'domain'
+ else:
+ scope = 'project'
identity_os = clients.ServiceClients(self.default_admin_creds,
self.identity_uri,
scope=scope)
@@ -157,62 +164,98 @@
os.network.PortsClient(),
os.network.SecurityGroupsClient())
- def _create_creds(self, admin=False, roles=None):
+ def _create_creds(self, admin=False, roles=None, scope='project'):
"""Create credentials with random name.
- Creates project and user. When admin flag is True create user
- with admin role. Assign user with additional roles (for example
- _member_) and roles requested by caller.
+ Creates user and role assignments on a project, domain, or system. When
+ the admin flag is True, creates user with the admin role on the
+ resource. If roles are provided, assigns those roles on the resource.
+ Otherwise, assigns the user the 'member' role on the resource.
:param admin: Flag if to assign to the user admin role
:type admin: bool
:param roles: Roles to assign for the user
:type roles: list
+ :param str scope: The scope for the role assignment, may be one of
+ 'project', 'domain', or 'system'.
:return: Readonly Credentials with network resources
+ :raises: Exception if scope is invalid
"""
+ if not roles:
+ roles = []
root = self.name
- project_name = data_utils.rand_name(root, prefix=self.resource_prefix)
- project_desc = project_name + "-desc"
- project = self.creds_client.create_project(
- name=project_name, description=project_desc)
+ cred_params = {
+ 'project': None,
+ 'domain': None,
+ 'system': None
+ }
+ if scope == 'project':
+ project_name = data_utils.rand_name(
+ root, prefix=self.resource_prefix)
+ project_desc = project_name + '-desc'
+ project = self.creds_client.create_project(
+ name=project_name, description=project_desc)
- # NOTE(andreaf) User and project can be distinguished from the context,
- # having the same ID in both makes it easier to match them and debug.
- username = project_name
- user_password = data_utils.rand_password()
- email = data_utils.rand_name(
- root, prefix=self.resource_prefix) + "@example.com"
- user = self.creds_client.create_user(
- username, user_password, project, email)
- role_assigned = False
+ # NOTE(andreaf) User and project can be distinguished from the
+ # context, having the same ID in both makes it easier to match them
+ # and debug.
+ username = project_name + '-project'
+ cred_params['project'] = project
+ elif scope == 'domain':
+ domain_name = data_utils.rand_name(
+ root, prefix=self.resource_prefix)
+ domain_desc = domain_name + '-desc'
+ domain = self.creds_client.create_domain(
+ name=domain_name, description=domain_desc)
+ username = domain_name + '-domain'
+ cred_params['domain'] = domain
+ elif scope == 'system':
+ prefix = data_utils.rand_name(root, prefix=self.resource_prefix)
+ username = prefix + '-system'
+ cred_params['system'] = 'all'
+ else:
+ raise lib_exc.InvalidScopeType(scope=scope)
if admin:
- self.creds_client.assign_user_role(user, project, self.admin_role)
- role_assigned = True
+ username += '-admin'
+ elif roles and len(roles) == 1:
+ username += '-' + roles[0]
+ user_password = data_utils.rand_password()
+ cred_params['password'] = user_password
+ user = self.creds_client.create_user(
+ username, user_password)
+ cred_params['user'] = user
+ roles_to_assign = [r for r in roles]
+ if admin:
+ roles_to_assign.append(self.admin_role)
+ self.creds_client.assign_user_role(
+ user, project, self.identity_admin_role)
if (self.identity_version == 'v3' and
self.identity_admin_domain_scope):
self.creds_client.assign_user_role_on_domain(
user, self.identity_admin_role)
# Add roles specified in config file
- for conf_role in self.extra_roles:
- self.creds_client.assign_user_role(user, project, conf_role)
- role_assigned = True
- # Add roles requested by caller
- if roles:
- for role in roles:
- self.creds_client.assign_user_role(user, project, role)
- role_assigned = True
+ roles_to_assign.extend(self.extra_roles)
+ # If there are still no roles, default to 'member'
# NOTE(mtreinish) For a user to have access to a project with v3 auth
# it must beassigned a role on the project. So we need to ensure that
# our newly created user has a role on the newly created project.
- if self.identity_version == 'v3' and not role_assigned:
+ if not roles_to_assign and self.identity_version == 'v3':
+ roles_to_assign = ['member']
try:
self.creds_client.create_user_role('member')
except lib_exc.Conflict:
LOG.warning('member role already exists, ignoring conflict.')
- self.creds_client.assign_user_role(user, project, 'member')
+ for role in roles_to_assign:
+ if scope == 'project':
+ self.creds_client.assign_user_role(user, project, role)
+ elif scope == 'domain':
+ self.creds_client.assign_user_role_on_domain(
+ user, role, domain)
+ elif scope == 'system':
+ self.creds_client.assign_user_role_on_system(user, role)
- creds = self.creds_client.get_credentials(user, project, user_password)
+ creds = self.creds_client.get_credentials(**cred_params)
return cred_provider.TestResources(creds)
def _create_network_resources(self, tenant_id):
@@ -327,16 +370,29 @@
self.routers_admin_client.add_router_interface(router_id,
subnet_id=subnet_id)
- def get_credentials(self, credential_type):
- if self._creds.get(str(credential_type)):
+ def get_credentials(self, credential_type, scope=None):
+ if not scope and self._creds.get(str(credential_type)):
credentials = self._creds[str(credential_type)]
+ elif scope and self._creds.get("%s_%s" % (scope, credential_type[0])):
+ credentials = self._creds["%s_%s" % (scope, credential_type[0])]
else:
- if credential_type in ['primary', 'alt', 'admin']:
+ if scope:
+ if credential_type == 'admin':
+ credentials = self._create_creds(
+ admin=True, scope=scope)
+ else:
+ credentials = self._create_creds(
+ roles=credential_type, scope=scope)
+ elif credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
- self._creds[str(credential_type)] = credentials
+ if scope:
+ self._creds["%s_%s" %
+ (scope, credential_type[0])] = credentials
+ else:
+ self._creds[str(credential_type)] = credentials
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n"
" credentials: %s", credentials)
@@ -358,6 +414,33 @@
def get_alt_creds(self):
return self.get_credentials('alt')
+ def get_system_admin_creds(self):
+ return self.get_credentials(['admin'], scope='system')
+
+ def get_system_member_creds(self):
+ return self.get_credentials(['member'], scope='system')
+
+ def get_system_reader_creds(self):
+ return self.get_credentials(['reader'], scope='system')
+
+ def get_domain_admin_creds(self):
+ return self.get_credentials(['admin'], scope='domain')
+
+ def get_domain_member_creds(self):
+ return self.get_credentials(['member'], scope='domain')
+
+ def get_domain_reader_creds(self):
+ return self.get_credentials(['reader'], scope='domain')
+
+ def get_project_admin_creds(self):
+ return self.get_credentials(['admin'], scope='project')
+
+ def get_project_member_creds(self):
+ return self.get_credentials(['member'], scope='project')
+
+ def get_project_reader_creds(self):
+ return self.get_credentials(['reader'], scope='project')
+
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
@@ -465,6 +548,16 @@
except lib_exc.NotFound:
LOG.warning("tenant with name: %s not found for delete",
creds.tenant_name)
+
+ # if cred is domain scoped, delete ephemeral domain
+ # do not delete default domain
+ if (hasattr(creds, 'domain_id') and
+ creds.domain_id != creds.project_domain_id):
+ try:
+ self.creds_client.delete_domain(creds.domain_id)
+ except lib_exc.NotFound:
+ LOG.warning("domain with name: %s not found for delete",
+ creds.domain_name)
self._creds = {}
def is_multi_user(self):
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 641d727..8325f44 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -104,15 +104,24 @@
return hash_dict
@classmethod
+ def _append_scoped_role(cls, scope, role, account_hash, hash_dict):
+ key = "%s_%s" % (scope, role)
+ hash_dict['scoped_roles'].setdefault(key, [])
+ hash_dict['scoped_roles'][key].append(account_hash)
+ return hash_dict
+
+ @classmethod
def get_hash_dict(cls, accounts, admin_role,
object_storage_operator_role=None,
object_storage_reseller_admin_role=None):
- hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
+ hash_dict = {'roles': {}, 'creds': {}, 'networks': {},
+ 'scoped_roles': {}}
# Loop over the accounts read from the yaml file
for account in accounts:
roles = []
types = []
+ scope = None
resources = []
if 'roles' in account:
roles = account.pop('roles')
@@ -120,6 +129,12 @@
types = account.pop('types')
if 'resources' in account:
resources = account.pop('resources')
+ if 'project_name' in account:
+ scope = 'project'
+ elif 'domain_name' in account:
+ scope = 'domain'
+ elif 'system' in account:
+ scope = 'system'
temp_hash = hashlib.md5()
account_for_hash = dict((k, v) for (k, v) in account.items()
if k in cls.HASH_CRED_FIELDS)
@@ -129,6 +144,9 @@
for role in roles:
hash_dict = cls._append_role(role, temp_hash_key,
hash_dict)
+ if scope:
+ hash_dict = cls._append_scoped_role(
+ scope, role, temp_hash_key, hash_dict)
# If types are set for the account append the matching role
# subdict with the hash
for type in types:
@@ -201,17 +219,25 @@
'the credentials for this allocation request' % ','.join(names))
raise lib_exc.InvalidCredentials(msg)
- def _get_match_hash_list(self, roles=None):
+ def _get_match_hash_list(self, roles=None, scope=None):
hashes = []
if roles:
# Loop over all the creds for each role in the subdict and generate
# a list of cred lists for each role
for role in roles:
- temp_hashes = self.hash_dict['roles'].get(role, None)
- if not temp_hashes:
- raise lib_exc.InvalidCredentials(
- "No credentials with role: %s specified in the "
- "accounts ""file" % role)
+ if scope:
+ key = "%s_%s" % (scope, role)
+ temp_hashes = self.hash_dict['scoped_roles'].get(key)
+ if not temp_hashes:
+ raise lib_exc.InvalidCredentials(
+ "No credentials matching role: %s, scope: %s "
+ "specified in the accounts file" % (role, scope))
+ else:
+ temp_hashes = self.hash_dict['roles'].get(role, None)
+ if not temp_hashes:
+ raise lib_exc.InvalidCredentials(
+ "No credentials with role: %s specified in the "
+ "accounts file" % role)
hashes.append(temp_hashes)
# Take the list of lists and do a boolean and between each list to
# find the creds which fall under all the specified roles
@@ -239,8 +265,8 @@
temp_creds.pop('password')
return temp_creds
- def _get_creds(self, roles=None):
- useable_hashes = self._get_match_hash_list(roles)
+ def _get_creds(self, roles=None, scope=None):
+ useable_hashes = self._get_match_hash_list(roles, scope)
if not useable_hashes:
msg = 'No users configured for type/roles %s' % roles
raise lib_exc.InvalidCredentials(msg)
@@ -296,6 +322,69 @@
self._creds['alt'] = net_creds
return net_creds
+ def get_system_admin_creds(self):
+ if self._creds.get('system_admin'):
+ return self._creds.get('system_admin')
+ system_admin = self._get_creds(['admin'], scope='system')
+ self._creds['system_admin'] = system_admin
+ return system_admin
+
+ def get_system_member_creds(self):
+ if self._creds.get('system_member'):
+ return self._creds.get('system_member')
+ system_member = self._get_creds(['member'], scope='system')
+ self._creds['system_member'] = system_member
+ return system_member
+
+ def get_system_reader_creds(self):
+ if self._creds.get('system_reader'):
+ return self._creds.get('system_reader')
+ system_reader = self._get_creds(['reader'], scope='system')
+ self._creds['system_reader'] = system_reader
+ return system_reader
+
+ def get_domain_admin_creds(self):
+ if self._creds.get('domain_admin'):
+ return self._creds.get('domain_admin')
+ domain_admin = self._get_creds(['admin'], scope='domain')
+ self._creds['domain_admin'] = domain_admin
+ return domain_admin
+
+ def get_domain_member_creds(self):
+ if self._creds.get('domain_member'):
+ return self._creds.get('domain_member')
+ domain_member = self._get_creds(['member'], scope='domain')
+ self._creds['domain_member'] = domain_member
+ return domain_member
+
+ def get_domain_reader_creds(self):
+ if self._creds.get('domain_reader'):
+ return self._creds.get('domain_reader')
+ domain_reader = self._get_creds(['reader'], scope='domain')
+ self._creds['domain_reader'] = domain_reader
+ return domain_reader
+
+ def get_project_admin_creds(self):
+ if self._creds.get('project_admin'):
+ return self._creds.get('project_admin')
+ project_admin = self._get_creds(['admin'], scope='project')
+ self._creds['project_admin'] = project_admin
+ return project_admin
+
+ def get_project_member_creds(self):
+ if self._creds.get('project_member'):
+ return self._creds.get('project_member')
+ project_member = self._get_creds(['member'], scope='project')
+ self._creds['project_member'] = project_member
+ return project_member
+
+ def get_project_reader_creds(self):
+ if self._creds.get('project_reader'):
+ return self._creds.get('project_reader')
+ project_reader = self._get_creds(['reader'], scope='project')
+ self._creds['project_reader'] = project_reader
+ return project_reader
+
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self._creds.get(six.text_type(roles).encode(
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index b47b511..a987e03 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -104,16 +104,18 @@
'location', 'proxy-authenticate',
'retry-after', 'server',
'vary', 'www-authenticate'))
- dscv = disable_ssl_certificate_validation
+ self.dscv = disable_ssl_certificate_validation
if proxy_url:
self.http_obj = http.ClosingProxyHttp(
proxy_url,
- disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ disable_ssl_certificate_validation=self.dscv,
+ ca_certs=ca_certs,
timeout=http_timeout, follow_redirects=follow_redirects)
else:
self.http_obj = http.ClosingHttp(
- disable_ssl_certificate_validation=dscv, ca_certs=ca_certs,
+ disable_ssl_certificate_validation=self.dscv,
+ ca_certs=ca_certs,
timeout=http_timeout, follow_redirects=follow_redirects)
def get_headers(self, accept_type=None, send_type=None):
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index b47d40d..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,13 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
-if six.PY2:
- # module thread is removed in Python 3
- from thread import get_ident # noqa: H237,F401
-
-else:
- # On Python3 thread module has been deprecated and get_ident has been moved
- # to threading module
- from threading import get_ident # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 44b55eb..b6671b5 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -169,6 +169,8 @@
:return: size randomly bytes
:rtype: string
"""
+ if size > 1 << 20:
+ raise RuntimeError('Size should be less than 1MiB')
return b''.join([six.int2byte(random.randint(0, 255))
for i in range(size)])
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index ebe2d61..25ff473 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -72,19 +72,13 @@
def decorator(f):
@functools.wraps(f)
def wrapper(*func_args, **func_kwargs):
- skip = False
- msg = ''
- if "condition" in kwargs:
- if kwargs["condition"] is True:
- skip = True
- else:
- skip = True
- if "bug" in kwargs and skip is True:
- bug = kwargs['bug']
+ condition = kwargs.get('condition', True)
+ bug = kwargs.get('bug', None)
+ if bug and condition:
bug_type = kwargs.get('bug_type', 'launchpad')
bug_url = _get_bug_url(bug, bug_type)
- msg = "Skipped until bug: %s is resolved." % bug_url
- raise testtools.TestCase.skipException(msg)
+ raise testtools.TestCase.skipException(
+ "Skipped until bug: %s is resolved." % bug_url)
return f(*func_args, **func_kwargs)
return wrapper
return decorator
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index 84b7ee6..abe68d2 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -294,3 +294,7 @@
class ConsistencyGroupSnapshotException(TempestException):
message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
"in ERROR status")
+
+
+class InvalidScopeType(TempestException):
+ message = "Invalid scope %(scope)s"
diff --git a/tempest/lib/services/clients.py b/tempest/lib/services/clients.py
index 90debd9..d328956 100644
--- a/tempest/lib/services/clients.py
+++ b/tempest/lib/services/clients.py
@@ -257,7 +257,7 @@
# class should only be used by tests hosted in Tempest.
@removals.removed_kwarg('client_parameters')
- def __init__(self, credentials, identity_uri, region=None, scope='project',
+ def __init__(self, credentials, identity_uri, region=None, scope=None,
disable_ssl_certificate_validation=True, ca_certs=None,
trace_requests='', client_parameters=None, proxy_url=None):
"""Service Clients provider
@@ -348,6 +348,14 @@
self.ca_certs = ca_certs
self.trace_requests = trace_requests
self.proxy_url = proxy_url
+ if self.credentials.project_id or self.credentials.project_name:
+ scope = 'project'
+ elif self.credentials.system:
+ scope = 'system'
+ elif self.credentials.domain_id or self.credentials.domain_name:
+ scope = 'domain'
+ else:
+ scope = 'project'
# Creates an auth provider for the credentials
self.auth_provider = auth_provider_class(
self.credentials, self.identity_uri, scope=scope,
diff --git a/tempest/lib/services/compute/interfaces_client.py b/tempest/lib/services/compute/interfaces_client.py
index e1c02fa..9244a4a 100644
--- a/tempest/lib/services/compute/interfaces_client.py
+++ b/tempest/lib/services/compute/interfaces_client.py
@@ -16,15 +16,22 @@
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import interfaces as schema
+from tempest.lib.api_schema.response.compute.v2_70 import interfaces as \
+ schemav270
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class InterfacesClient(base_compute_client.BaseComputeClient):
+ schema_versions_info = [
+ {'min': None, 'max': '2.69', 'schema': schema},
+ {'min': '2.70', 'max': None, 'schema': schemav270}]
+
def list_interfaces(self, server_id):
resp, body = self.get('servers/%s/os-interface' % server_id)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -40,6 +47,7 @@
resp, body = self.post('servers/%s/os-interface' % server_id,
body=post_body)
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -47,6 +55,7 @@
resp, body = self.get('servers/%s/os-interface/%s' % (server_id,
port_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.get_create_interfaces, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/identity_providers_client.py b/tempest/lib/services/identity/v3/identity_providers_client.py
new file mode 100644
index 0000000..af6a245
--- /dev/null
+++ b/tempest/lib/services/identity/v3/identity_providers_client.py
@@ -0,0 +1,92 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class IdentityProvidersClient(rest_client.RestClient):
+
+ def register_identity_provider(self, identity_provider_id, **kwargs):
+ """Register an identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-an-identity-provider
+ """
+ post_body = json.dumps({'identity_provider': kwargs})
+ resp, body = self.put(
+ 'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+ post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_identity_providers(self, **params):
+ """List identity providers.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-identity-providers
+ """
+ url = 'identity_providers'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def get_identity_provider(self, identity_provider_id):
+ """Get identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-identity-provider
+ """
+ resp, body = self.get(
+ 'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_identity_provider(self, identity_provider_id):
+ """Delete identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-identity-provider
+ """
+ resp, body = self.delete(
+ 'OS-FEDERATION/identity_providers/%s' % identity_provider_id)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_identity_provider(self, identity_provider_id, **kwargs):
+ """Update identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-identity-provider
+ """
+ post_body = json.dumps({'identity_provider': kwargs})
+ resp, body = self.patch(
+ 'OS-FEDERATION/identity_providers/%s' % identity_provider_id,
+ post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/mappings_client.py b/tempest/lib/services/identity/v3/mappings_client.py
new file mode 100644
index 0000000..9ec5384
--- /dev/null
+++ b/tempest/lib/services/identity/v3/mappings_client.py
@@ -0,0 +1,90 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class MappingsClient(rest_client.RestClient):
+
+ def create_mapping(self, mapping_id, **kwargs):
+ """Create a mapping.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#create-a-mapping
+ """
+ post_body = json.dumps({'mapping': kwargs})
+ resp, body = self.put(
+ 'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def get_mapping(self, mapping_id):
+ """Get a mapping.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-a-mapping
+ """
+ resp, body = self.get(
+ 'OS-FEDERATION/mappings/%s' % mapping_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_mapping(self, mapping_id, **kwargs):
+ """Update a mapping.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-a-mapping
+ """
+ post_body = json.dumps({'mapping': kwargs})
+ resp, body = self.patch(
+ 'OS-FEDERATION/mappings/%s' % mapping_id, post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_mappings(self, **kwargs):
+ """List mappings.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-mappings
+ """
+ url = 'OS-FEDERATION/mappings'
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_mapping(self, mapping_id):
+ """Delete a mapping.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-mapping
+ """
+ resp, body = self.delete(
+ 'OS-FEDERATION/mappings/%s' % mapping_id)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/protocols_client.py b/tempest/lib/services/identity/v3/protocols_client.py
new file mode 100644
index 0000000..2e0221b
--- /dev/null
+++ b/tempest/lib/services/identity/v3/protocols_client.py
@@ -0,0 +1,96 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class ProtocolsClient(rest_client.RestClient):
+
+ def add_protocol_to_identity_provider(self, idp_id, protocol_id,
+ **kwargs):
+ """Add protocol to identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#add-protocol-to-identity-provider
+ """
+ post_body = json.dumps({'protocol': kwargs})
+ resp, body = self.put(
+ 'OS-FEDERATION/identity_providers/%s/protocols/%s'
+ % (idp_id, protocol_id), post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_protocols_of_identity_provider(self, idp_id, **kwargs):
+ """List protocols of identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-protocols-of-identity-provider
+ """
+ url = 'OS-FEDERATION/identity_providers/%s/protocols' % idp_id
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def get_protocol_for_identity_provider(self, idp_id, protocol_id):
+ """Get protocol for identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-protocol-for-identity-provider
+ """
+ resp, body = self.get(
+ 'OS-FEDERATION/identity_providers/%s/protocols/%s'
+ % (idp_id, protocol_id))
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_mapping_for_identity_provider(self, idp_id, protocol_id,
+ **kwargs):
+ """Update attribute mapping for identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-attribute-mapping-for-identity-provider
+ """
+ post_body = json.dumps({'protocol': kwargs})
+ resp, body = self.patch(
+ 'OS-FEDERATION/identity_providers/%s/protocols/%s'
+ % (idp_id, protocol_id), post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_protocol_from_identity_provider(self, idp_id, protocol_id):
+ """Delete a protocol from identity provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-a-protocol-from-identity-provider
+ """
+ resp, body = self.delete(
+ 'OS-FEDERATION/identity_providers/%s/protocols/%s'
+ % (idp_id, protocol_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index 0d7593a..e41dc28 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -89,6 +89,13 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
+ def create_user_role_on_system(self, user_id, role_id):
+ """Add roles to a user on the system."""
+ resp, body = self.put('system/users/%s/roles/%s' %
+ (user_id, role_id), None)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def list_user_roles_on_project(self, project_id, user_id):
"""list roles of a user on a project."""
resp, body = self.get('projects/%s/users/%s/roles' %
@@ -105,6 +112,13 @@
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+ def list_user_roles_on_system(self, user_id):
+ """list roles of a user on the system."""
+ resp, body = self.get('system/users/%s/roles' % user_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
def delete_role_from_user_on_project(self, project_id, user_id, role_id):
"""Delete role of a user on a project."""
resp, body = self.delete('projects/%s/users/%s/roles/%s' %
@@ -119,6 +133,13 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
+ def delete_role_from_user_on_system(self, user_id, role_id):
+ """Delete role of a user on the system."""
+ resp, body = self.delete('system/users/%s/roles/%s' %
+ (user_id, role_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def check_user_role_existence_on_project(self, project_id,
user_id, role_id):
"""Check role of a user on a project."""
@@ -135,6 +156,12 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
+ def check_user_role_existence_on_system(self, user_id, role_id):
+ """Check role of a user on the system."""
+ resp, body = self.head('system/users/%s/roles/%s' % (user_id, role_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp)
+
def create_group_role_on_project(self, project_id, group_id, role_id):
"""Add roles to a group on a project."""
resp, body = self.put('projects/%s/groups/%s/roles/%s' %
@@ -149,6 +176,13 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
+ def create_group_role_on_system(self, group_id, role_id):
+ """Add roles to a group on the system."""
+ resp, body = self.put('system/groups/%s/roles/%s' %
+ (group_id, role_id), None)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def list_group_roles_on_project(self, project_id, group_id):
"""list roles of a group on a project."""
resp, body = self.get('projects/%s/groups/%s/roles' %
@@ -165,6 +199,13 @@
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+ def list_group_roles_on_system(self, group_id):
+ """list roles of a group on the system."""
+ resp, body = self.get('system/groups/%s/roles' % group_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
def delete_role_from_group_on_project(self, project_id, group_id, role_id):
"""Delete role of a group on a project."""
resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
@@ -179,6 +220,13 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
+ def delete_role_from_group_on_system(self, group_id, role_id):
+ """Delete role of a group on the system."""
+ resp, body = self.delete('system/groups/%s/roles/%s' %
+ (group_id, role_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def check_role_from_group_on_project_existence(self, project_id,
group_id, role_id):
"""Check role of a group on a project."""
@@ -195,6 +243,13 @@
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
+ def check_role_from_group_on_system_existence(self, group_id, role_id):
+ """Check role of a group on the system."""
+ resp, body = self.head('system/groups/%s/roles/%s' %
+ (group_id, role_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp)
+
def create_role_inference_rule(self, prior_role, implies_role):
"""Create a role inference rule."""
resp, body = self.put('roles/%s/implies/%s' %
diff --git a/tempest/lib/services/identity/v3/service_providers_client.py b/tempest/lib/services/identity/v3/service_providers_client.py
new file mode 100644
index 0000000..b84cf43
--- /dev/null
+++ b/tempest/lib/services/identity/v3/service_providers_client.py
@@ -0,0 +1,92 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class ServiceProvidersClient(rest_client.RestClient):
+
+ def register_service_provider(self, service_provider_id, **kwargs):
+ """Register a service provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#register-a-service-provider
+ """
+ post_body = json.dumps({'service_provider': kwargs})
+ resp, body = self.put(
+ 'OS-FEDERATION/service_providers/%s' % service_provider_id,
+ post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_service_providers(self, **kwargs):
+ """List service providers.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#list-service-providers
+ """
+ url = 'OS-FEDERATION/service_providers'
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def get_service_provider(self, service_provider_id):
+ """Get a service provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#get-service-provider
+ """
+ resp, body = self.get(
+ 'OS-FEDERATION/service_providers/%s' % service_provider_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_service_provider(self, service_provider_id):
+ """Delete a service provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#delete-service-provider
+ """
+ resp, body = self.delete(
+ 'OS-FEDERATION/service_providers/%s' % service_provider_id)
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def update_service_provider(self, service_provider_id, **kwargs):
+ """Update a service provider.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3-ext/index.html#update-service-provider
+ """
+ post_body = json.dumps({'service_provider': kwargs})
+ resp, body = self.patch(
+ 'OS-FEDERATION/service_providers/%s' % service_provider_id,
+ post_body)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/token_client.py b/tempest/lib/services/identity/v3/token_client.py
index 6956297..08a8f46 100644
--- a/tempest/lib/services/identity/v3/token_client.py
+++ b/tempest/lib/services/identity/v3/token_client.py
@@ -51,7 +51,7 @@
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
- domain_name=None, token=None, app_cred_id=None,
+ domain_name=None, system=None, token=None, app_cred_id=None,
app_cred_secret=None):
"""Obtains a token from the authentication service
@@ -65,6 +65,7 @@
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
+ :param system: whether the token should be scoped to the system
:param token: a token to re-scope.
Accepts different combinations of credentials.
@@ -74,6 +75,7 @@
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
+ - username, password, user_domain_id, system
Validation is left to the server side.
"""
creds = {
@@ -135,6 +137,8 @@
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
+ elif system:
+ creds['auth']['scope'] = dict(system={system: True})
body = json.dumps(creds, sort_keys=True)
resp, body = self.post(self.auth_url, body=body)
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index 383aff6..1d38153 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import ssl
from six.moves import http_client as httplib
from six.moves.urllib import parse as urlparse
@@ -118,7 +119,7 @@
path = str(parsed.path) + "/"
path += "%s/%s" % (str(container), str(object_name))
- conn = _create_connection(parsed)
+ conn = self._create_connection(parsed)
# Send the PUT request and the headers including the "Expect" header
conn.putrequest('PUT', path)
@@ -151,15 +152,20 @@
return resp.status, resp.reason
+ def _create_connection(self, parsed_url):
+ """Helper function to create connection with httplib
-def _create_connection(parsed_url):
- """Helper function to create connection with httplib
+ :param parsed_url: parsed url of the remote location
+ """
+ context = None
+ # If CONF.identity.disable_ssl_certificate_validation is true,
+ # do not check ssl certification.
+ if self.dscv:
+ context = ssl._create_unverified_context()
+ if parsed_url.scheme == 'https':
+ conn = httplib.HTTPSConnection(parsed_url.netloc,
+ context=context)
+ else:
+ conn = httplib.HTTPConnection(parsed_url.netloc)
- :param parsed_url: parsed url of the remote location
- """
- if parsed_url.scheme == 'https':
- conn = httplib.HTTPSConnection(parsed_url.netloc)
- else:
- conn = httplib.HTTPConnection(parsed_url.netloc)
-
- return conn
+ return conn
diff --git a/tempest/manager.py b/tempest/manager.py
deleted file mode 100644
index b485ef2..0000000
--- a/tempest/manager.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-
-from tempest import clients as tempest_clients
-from tempest import config
-from tempest.lib.services import clients
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class Manager(clients.ServiceClients):
- """Service client manager class for backward compatibility
-
- The former manager.Manager is not a stable interface in Tempest,
- nonetheless it is consumed by a number of plugins already. This class
- exists to provide some grace time for the move to tempest.lib.
- """
-
- def __init__(self, credentials, scope='project'):
- msg = ("tempest.manager.Manager is not a stable interface and as such "
- "it should not be imported directly. It will be removed as "
- "soon as the client manager becomes available in tempest.lib.")
- LOG.warning(msg)
- dscv = CONF.identity.disable_ssl_certificate_validation
- _, uri = tempest_clients.get_auth_provider_class(credentials)
- super(Manager, self).__init__(
- credentials=credentials, scope=scope,
- identity_uri=uri,
- disable_ssl_certificate_validation=dscv,
- ca_certs=CONF.identity.ca_certificates_file,
- trace_requests=CONF.debug.trace_requests)
-
-
-def get_auth_provider(credentials, pre_auth=False, scope='project'):
- """Shim to get_auth_provider in clients.py
-
- get_auth_provider used to be hosted in this module, but it has been
- moved to clients.py now as a more permanent location.
- This module will be removed eventually, and this shim is only
- maintained for the benefit of plugins already consuming this interface.
- """
- msg = ("tempest.manager.get_auth_provider is not a stable interface and "
- "as such it should not imported directly. It will be removed as "
- "the client manager becomes available in tempest.lib.")
- LOG.warning(msg)
- return tempest_clients.get_auth_provider(credentials=credentials,
- pre_auth=pre_auth, scope=scope)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f01c056..4652af4 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -143,10 +143,20 @@
# resp part which is not used in scenario tests
def create_port(self, network_id, client=None, **kwargs):
- """Creates port"""
+ """Creates port for the respective network_id
+
+ :param network_id: the id of the network
+ :param client: the client to use, defaults to self.ports_client
+ :param kwargs: additional arguments such as:
+ - namestart - a string to generate a name for the port from
+ - default is self.__class__.__name__
+ - 'binding:vnic_type' - defaults to CONF.network.port_vnic_type
+ - 'binding:profile' - defaults to CONF.network.port_profile
+ """
if not client:
client = self.ports_client
- name = data_utils.rand_name(self.__class__.__name__)
+ name = data_utils.rand_name(
+ kwargs.pop('namestart', self.__class__.__name__))
if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
if CONF.network.port_profile and 'binding:profile' not in kwargs:
@@ -155,12 +165,13 @@
name=name,
network_id=network_id,
**kwargs)
+ self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
- def create_keypair(self, client=None):
+ def create_keypair(self, client=None, **kwargs):
"""Creates keypair
Keypair is a public key of OpenSSH key pair used for accessing
@@ -170,10 +181,11 @@
"""
if not client:
client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
+ if not kwargs.get('name'):
+ kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
+ body = client.create_keypair(**kwargs)
+ self.addCleanup(client.delete_keypair, kwargs['name'])
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
@@ -199,6 +211,14 @@
direct: an SR-IOV port that is directly attached to a VM
macvtap: an SR-IOV port that is attached to a VM via a macvtap
device.
+ direct-physical: an SR-IOV port that is directly attached to a
+ VM using physical instead of virtual
+ functions.
+ baremetal: a baremetal port directly attached to a baremetal
+ node.
+ virtio-forwarder: an SR-IOV port that is indirectly attached
+ to a VM using a low-latency vhost-user
+ forwarding process.
Defaults to ``CONF.network.port_vnic_type``.
* *port_profile* (``dict``) --
This attribute is a dictionary that can be used (with admin
@@ -206,6 +226,9 @@
the port.
example: port_profile = "capabilities:[switchdev]"
Defaults to ``CONF.network.port_profile``.
+ * *create_port_body* (``dict``) --
+ This attribute is a dictionary of additional arguments to be
+ passed to create_port method.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
@@ -231,7 +254,7 @@
# every network
if vnic_type or profile:
ports = []
- create_port_body = {}
+ create_port_body = kwargs.pop('create_port_body', {})
if vnic_type:
create_port_body['binding:vnic_type'] = vnic_type
@@ -352,21 +375,35 @@
def create_backup(self, volume_id, name=None, description=None,
force=False, snapshot_id=None, incremental=False,
- container=None):
- """Creates backup
+ container=None, **kwargs):
+ """Creates a backup of the given volume_id or snapshot_id
- This wrapper utility creates backup and waits for backup to be
- in 'available' state.
+ This wrapper utility creates a backup and waits until it is in
+ 'available' state.
+
+ :param volume_id: UUID of the volume to back up
+ :param name: backup name, '$classname-backup' by default
+ :param description: Description of the backup, None by default
+ :param force: boolean whether to backup even if the volume is attached
+ False by default
+ :param snapshot_id: UUID of the source snapshot to back up
+ None by default
+ :param incremental: boolean, False by default
+ :param container: a container name, None by default
+ :param **kwargs: additional parameters per the documentation:
+ https://docs.openstack.org/api-ref/block-storage/v3/
+ #create-a-backup
"""
name = name or data_utils.rand_name(
self.__class__.__name__ + "-backup")
- kwargs = {'name': name,
- 'description': description,
- 'force': force,
- 'snapshot_id': snapshot_id,
- 'incremental': incremental,
- 'container': container}
+ args = {'name': name,
+ 'description': description,
+ 'force': force,
+ 'snapshot_id': snapshot_id,
+ 'incremental': incremental,
+ 'container': container}
+ args.update(kwargs)
backup = self.backups_client.create_backup(volume_id=volume_id,
**kwargs)['backup']
self.addCleanup(self.backups_client.delete_backup, backup['id'])
@@ -374,14 +411,20 @@
backup['id'], 'available')
return backup
- def restore_backup(self, backup_id):
- """Restore backup
+ def restore_backup(self, backup_id, **kwargs):
+ """Restores a backup given by the backup_id
- This wrapper utility restores backup and waits for backup to be
- in 'available' state.
+ This wrapper utility restores a backup and waits until it is in
+ 'available' state.
+
+ :param backup_id: UUID of a backup to restore
+ :param **kwargs: additional parameters per the documentation:
+ https://docs.openstack.org/api-ref/block-storage/v3/
+ #restore-a-backup
"""
- restore = self.backups_client.restore_backup(backup_id)['restore']
+ body = self.backups_client.restore_backup(backup_id, **kwargs)
+ restore = body['restore']
self.addCleanup(self.volumes_client.delete_volume,
restore['volume_id'])
waiters.wait_for_volume_resource_status(self.backups_client,
@@ -392,19 +435,17 @@
self.assertEqual(backup_id, restore['backup_id'])
return restore
- def rebuild_server(self, server_id, image=None,
- preserve_ephemeral=False, wait=True,
- rebuild_kwargs=None):
+ def rebuild_server(self, server_id, image=None, preserve_ephemeral=False,
+ wait=True, **kwargs):
if image is None:
image = CONF.compute.image_ref
- rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild_server(
server_id=server_id,
image_ref=image,
preserve_ephemeral=preserve_ephemeral,
- **rebuild_kwargs)
+ **kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
@@ -464,7 +505,8 @@
admin_volumes_client.wait_for_resource_deletion(volume['id'])
admin_volume_type_client.delete_volume_type(volume_type['id'])
- def create_volume_type(self, client=None, name=None, backend_name=None):
+ def create_volume_type(self, client=None, name=None, backend_name=None,
+ **kwargs):
"""Creates volume type
In a multiple-storage back-end configuration,
@@ -491,17 +533,26 @@
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
- extra_specs = {}
+ extra_specs = kwargs.pop("extra_specs", {})
if backend_name:
- extra_specs = {"volume_backend_name": backend_name}
+ extra_specs.update({"volume_backend_name": backend_name})
- volume_type = client.create_volume_type(
- name=randomized_name, extra_specs=extra_specs)['volume_type']
+ volume_type_resp = client.create_volume_type(
+ name=randomized_name, extra_specs=extra_specs, **kwargs)
+ volume_type = volume_type_resp['volume_type']
+
self.assertIn('id', volume_type)
self.addCleanup(self._cleanup_volume_type, volume_type)
return volume_type
- def _create_loginable_secgroup_rule(self, secgroup_id=None):
+ def create_loginable_secgroup_rule(self, secgroup_id=None, rulesets=None):
+ """Create loginable security group rule by compute clients.
+
+ This function will create by default the following rules:
+ 1. tcp port 22 allow rule in order to allow ssh access for ipv4
+ 2. ipv4 icmp allow rule in order to allow icmpv4
+ """
+
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
@@ -514,22 +565,23 @@
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- }
- ]
+ if not rulesets:
+ rulesets = [
+ {
+ # ssh
+ 'ip_protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ },
+ {
+ # ping
+ 'ip_protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '0.0.0.0/0',
+ }
+ ]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
@@ -537,21 +589,23 @@
rules.append(sg_rule)
return rules
- def _create_security_group(self):
+ def _create_security_group(self, **kwargs):
"""Create security group and add rules to security group"""
- sg_name = data_utils.rand_name(self.__class__.__name__)
- sg_desc = sg_name + " description"
+ if not kwargs.get('name'):
+ kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
+ if not kwargs.get('description'):
+ kwargs['description'] = kwargs['name'] + " description"
secgroup = self.compute_security_groups_client.create_security_group(
- name=sg_name, description=sg_desc)['security_group']
- self.assertEqual(secgroup['name'], sg_name)
- self.assertEqual(secgroup['description'], sg_desc)
+ **kwargs)['security_group']
+ self.assertEqual(secgroup['name'], kwargs['name'])
+ self.assertEqual(secgroup['description'], kwargs['description'])
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
- self._create_loginable_secgroup_rule(secgroup['id'])
+ self.create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None,
@@ -583,7 +637,7 @@
linux_client.validate_authentication()
return linux_client
- def image_create(self, name='scenario-img'):
+ def image_create(self, name='scenario-img', **kwargs):
img_path = CONF.scenario.img_file
if not os.path.exists(img_path):
# TODO(kopecmartin): replace LOG.warning for rasing
@@ -623,6 +677,7 @@
# Additional properties are flattened out in the v2 API.
if img_properties:
params.update(img_properties)
+ params.update(kwargs)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
@@ -635,7 +690,7 @@
LOG.debug("image:%s", image['id'])
return image['id']
- def _log_console_output(self, servers=None, client=None, **kwargs):
+ def log_console_output(self, servers=None, client=None, **kwargs):
"""Console log output"""
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
@@ -659,7 +714,7 @@
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
- def create_server_snapshot(self, server, name=None):
+ def create_server_snapshot(self, server, name=None, **kwargs):
"""Creates server snapshot"""
# Glance client
_image_client = self.image_client
@@ -668,7 +723,7 @@
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
- image = _images_client.create_image(server['id'], name=name)
+ image = _images_client.create_image(server['id'], name=name, **kwargs)
image_id = image.response['location'].split('images/')[1]
waiters.wait_for_image_status(_image_client, image_id, 'active')
@@ -727,14 +782,12 @@
def nova_volume_detach(self, server, volume):
"""Compute volume detach
- This utility detaches volume from compute and check whether the
- volume status is 'available' state, and if not, an exception
- will be thrown.
+ This utility detaches the volume from the server and checks whether the
+ volume attachment has been removed from Nova.
"""
self.servers_client.detach_volume(server['id'], volume['id'])
- waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'available')
- volume = self.volumes_client.show_volume(volume['id'])['volume']
+ waiters.wait_for_volume_attachment_remove_from_server(
+ self.servers_client, server['id'], volume['id'])
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None, server=None):
@@ -773,7 +826,7 @@
'result': 'expected' if result else 'unexpected'
})
if server:
- self._log_console_output([server])
+ self.log_console_output([server])
return result
def check_vm_connectivity(self, ip_address,
@@ -840,18 +893,20 @@
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None,
+ fs='ext4'):
"""Creates timestamp
This wrapper utility does ssh, creates timestamp and returns the
created timestamp.
"""
-
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
- ssh_client.make_fs(dev_name)
+ ssh_client.make_fs(dev_name, fs=fs)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
@@ -863,15 +918,25 @@
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None):
"""Returns timestamp
This wrapper utility does ssh and returns the timestamp.
+
+ :param ip_address: The floating IP or fixed IP of the remote server
+ :param dev_name: Name of the device that stores the timestamp
+ :param mount_path: Path which should be used as mount point for
+ dev_name
+ :param private_key: The SSH private key to use for authentication
+ :param server: Server dict, used for debugging purposes
+ :param username: Name of the Linux account on the remote server
"""
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -958,12 +1023,21 @@
return self.create_server(**create_kwargs)
- def create_volume_from_image(self):
- """Create volume from image"""
- img_uuid = CONF.compute.image_ref
- vol_name = data_utils.rand_name(
- self.__class__.__name__ + '-volume-origin')
- return self.create_volume(name=vol_name, imageRef=img_uuid)
+ def create_volume_from_image(self, **kwargs):
+ """Create volume from image.
+
+ :param image_id: ID of the image to create volume from,
+ CONF.compute.image_ref by default
+ :param name: name of the volume,
+ '$classname-volume-origin' by default
+ :param **kwargs: additional parameters
+ """
+ image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+ name = kwargs.pop('name', None)
+ if not name:
+ namestart = self.__class__.__name__ + '-volume-origin'
+ name = data_utils.rand_name(namestart)
+ return self.create_volume(name=name, imageRef=image_id, **kwargs)
class NetworkScenarioTest(ScenarioTest):
@@ -1032,12 +1106,19 @@
def cidr_in_use(cidr, project_id):
"""Check cidr existence
- :returns: True if subnet with cidr already exist in tenant
- False else
+ :returns: True if subnet with cidr already exist in tenant or
+ external False else
"""
- cidr_in_use = self.os_admin.subnets_client.list_subnets(
+ tenant_subnets = self.os_admin.subnets_client.list_subnets(
project_id=project_id, cidr=cidr)['subnets']
- return len(cidr_in_use) != 0
+ external_nets = self.os_admin.networks_client.list_networks(
+ **{"router:external": True})['networks']
+ external_subnets = []
+ for ext_net in external_nets:
+ external_subnets.extend(
+ self.os_admin.subnets_client.list_subnets(
+ network_id=ext_net['id'], cidr=cidr)['subnets'])
+ return len(tenant_subnets + external_subnets) != 0
ip_version = kwargs.pop('ip_version', 4)
@@ -1083,14 +1164,13 @@
return subnet
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- if ip_addr:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'],
- fixed_ips='ip_address=%s' % ip_addr)['ports']
- else:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'])['ports']
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
+
+ if ip_addr and not kwargs.get('fixed_ips'):
+ kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], **kwargs)['ports']
+
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
@@ -1129,7 +1209,7 @@
return net[0]
def create_floating_ip(self, server, external_network_id=None,
- port_id=None, client=None):
+ port_id=None, client=None, **kwargs):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
@@ -1141,15 +1221,17 @@
else:
ip4 = None
- kwargs = {
+ floatingip_kwargs = {
'floating_network_id': external_network_id,
'port_id': port_id,
'tenant_id': server.get('project_id') or server['tenant_id'],
'fixed_ip_address': ip4,
}
if CONF.network.subnet_id:
- kwargs['subnet_id'] = CONF.network.subnet_id
- result = client.create_floatingip(**kwargs)
+ floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+ floatingip_kwargs.update(kwargs)
+ result = client.create_floatingip(**floatingip_kwargs)
floating_ip = result['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
@@ -1157,6 +1239,32 @@
floating_ip['id'])
return floating_ip
+ def associate_floating_ip(self, floating_ip, server):
+ """Associate floating ip
+
+ This wrapper utility attaches the floating_ip for
+ the respective port_id of server
+ """
+ port_id, _ = self._get_server_port_id_and_ip4(server)
+ kwargs = dict(port_id=port_id)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertEqual(port_id, floating_ip['port_id'])
+ return floating_ip
+
+ def disassociate_floating_ip(self, floating_ip):
+ """Disassociates floating ip
+
+ This wrapper utility disassociates given floating ip.
+ :param floating_ip: a dict which is a return value of
+ floating_ips_client.create_floatingip method
+ """
+ kwargs = dict(port_id=None)
+ floating_ip = self.floating_ips_client.update_floatingip(
+ floating_ip['id'], **kwargs)['floatingip']
+ self.assertIsNone(floating_ip['port_id'])
+ return floating_ip
+
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
@@ -1207,7 +1315,7 @@
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
+ self.log_console_output(servers_for_debug)
self._log_net_info(e)
raise
@@ -1250,7 +1358,7 @@
% (dest, source_host)
else:
msg = "%s is reachable from %s" % (dest, source_host)
- self._log_console_output()
+ self.log_console_output()
self.fail(msg)
def _create_security_group(self, security_group_rules_client=None,
@@ -1268,7 +1376,7 @@
project_id=project_id)
# Add rules to the security group
- rules = self._create_loginable_secgroup_rule(
+ rules = self.create_loginable_secgroup_rule(
security_group_rules_client=security_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client)
@@ -1309,10 +1417,10 @@
client.delete_security_group, secgroup['id'])
return secgroup
- def _create_security_group_rule(self, secgroup=None,
- sec_group_rules_client=None,
- project_id=None,
- security_groups_client=None, **kwargs):
+ def create_security_group_rule(self, secgroup=None,
+ sec_group_rules_client=None,
+ project_id=None,
+ security_groups_client=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
@@ -1357,10 +1465,10 @@
return sg_rule
- def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
- secgroup=None,
- security_groups_client=None):
- """Create loginable security group rule
+ def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
+ """Create loginable security group rule by neutron clients by default.
This function will create:
1. egress and ingress tcp port 22 allow rule in order to allow ssh
@@ -1396,7 +1504,7 @@
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
- sg_rule = self._create_security_group_rule(
+ sg_rule = self.create_security_group_rule(
sec_group_rules_client=sec_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client,
@@ -1412,7 +1520,7 @@
return rules
- def _get_router(self, client=None, project_id=None):
+ def _get_router(self, client=None, project_id=None, **kwargs):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
@@ -1432,11 +1540,20 @@
body = client.show_router(router_id)
return body['router']
elif network_id:
+ name = kwargs.pop('name', None)
+ if not name:
+ namestart = self.__class__.__name__ + '-router'
+ name = data_utils.rand_name(namestart)
+
+ ext_gw_info = kwargs.pop('external_gateway_info', None)
+ if not ext_gw_info:
+ ext_gw_info = dict(network_id=network_id)
router = client.create_router(
- name=data_utils.rand_name(self.__class__.__name__ + '-router'),
- admin_state_up=True,
+ name=name,
+ admin_state_up=kwargs.get('admin_state_up', True),
project_id=project_id,
- external_gateway_info=dict(network_id=network_id))['router']
+ external_gateway_info=ext_gw_info,
+ **kwargs)['router']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_router, router['id'])
return router
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
new file mode 100644
index 0000000..b1098fa
--- /dev/null
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -0,0 +1,141 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import html.parser
+import ssl
+from urllib import parse
+from urllib import request
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+from tempest import test
+
+CONF = config.CONF
+
+
+class HorizonHTMLParser(html.parser.HTMLParser):
+ csrf_token = None
+ region = None
+ login = None
+
+ def _find_name(self, attrs, name):
+ for attrpair in attrs:
+ if attrpair[0] == 'name' and attrpair[1] == name:
+ return True
+ return False
+
+ def _find_value(self, attrs):
+ for attrpair in attrs:
+ if attrpair[0] == 'value':
+ return attrpair[1]
+ return None
+
+ def _find_attr_value(self, attrs, attr_name):
+ for attrpair in attrs:
+ if attrpair[0] == attr_name:
+ return attrpair[1]
+ return None
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'input':
+ if self._find_name(attrs, 'csrfmiddlewaretoken'):
+ self.csrf_token = self._find_value(attrs)
+ if self._find_name(attrs, 'region'):
+ self.region = self._find_value(attrs)
+ if tag == 'form':
+ self.login = self._find_attr_value(attrs, 'action')
+
+
+class TestDashboardBasicOps(test.BaseTestCase):
+
+ """The test suite for dashboard basic operations
+
+ This is a basic scenario test:
+ * checks that the login page is available
+ * logs in as a regular user
+ * checks that the user home page loads without error
+ """
+ opener = None
+
+ credentials = ['primary']
+
+ @classmethod
+ def skip_checks(cls):
+ super(TestDashboardBasicOps, cls).skip_checks()
+ if not CONF.service_available.horizon:
+ raise cls.skipException("Horizon support is required")
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources()
+ super(TestDashboardBasicOps, cls).setup_credentials()
+
+ def check_login_page(self):
+ response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+ self.assertIn("id_username", response.decode("utf-8"))
+
+ def user_login(self, username, password):
+ response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+
+ # Grab the CSRF token and default region
+ parser = HorizonHTMLParser()
+ parser.feed(response.decode("utf-8"))
+
+ # construct login url for dashboard, discovery accommodates non-/ web
+ # root for dashboard
+ login_url = parse.urljoin(CONF.dashboard.dashboard_url, parser.login)
+
+ # Prepare login form request
+ req = request.Request(login_url)
+ req.add_header('Content-type', 'application/x-www-form-urlencoded')
+ req.add_header('Referer', CONF.dashboard.dashboard_url)
+
+ # Pass the default domain name regardless of the auth version in order
+ # to test the scenario of when horizon is running with keystone v3
+ params = {'username': username,
+ 'password': password,
+ 'region': parser.region,
+ 'domain': CONF.auth.default_credentials_domain_name,
+ 'csrfmiddlewaretoken': parser.csrf_token}
+ self._get_opener().open(req, parse.urlencode(params).encode())
+
+ def check_home_page(self):
+ response = self._get_opener().open(CONF.dashboard.dashboard_url).read()
+ self.assertIn('Overview', response.decode("utf-8"))
+
+ def _get_opener(self):
+ if not self.opener:
+ if (CONF.dashboard.disable_ssl_certificate_validation and
+ self._ssl_default_context_supported()):
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+ self.opener = request.build_opener(
+ request.HTTPSHandler(context=ctx),
+ request.HTTPCookieProcessor())
+ else:
+ self.opener = request.build_opener(
+ request.HTTPCookieProcessor())
+ return self.opener
+
+ def _ssl_default_context_supported(self):
+ return (hasattr(ssl, 'create_default_context'))
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
+ @utils.services('dashboard')
+ def test_basic_scenario(self):
+ creds = self.os_primary.credentials
+ self.check_login_page()
+ self.user_login(creds.username, creds.password)
+ self.check_home_page()
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index fc93a5e..6ee9f28 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -30,8 +30,7 @@
For both LUKS and cryptsetup encryption types, this test performs
the following:
- * Creates an image in Glance
- * Boots an instance from the image
+ * Boots an instance from an image (CONF.compute.image_ref)
* Creates an encryption type (as admin)
* Creates a volume of that encryption type (as a regular user)
* Attaches and detaches the encrypted volume to the instance
@@ -44,10 +43,9 @@
raise cls.skipException('Encrypted volume attach is not supported')
def launch_instance(self):
- image = self.image_create()
keypair = self.create_keypair()
- return self.create_server(image_id=image, key_name=keypair['name'])
+ return self.create_server(key_name=keypair['name'])
def attach_detach_volume(self, server, volume):
attached_volume = self.nova_volume_attach(server, volume)
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index 74d4ed9..8c2752d 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
+import testtools
from tempest.common import utils
from tempest.common import waiters
@@ -20,10 +20,10 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
from tempest.scenario import manager
-LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -55,6 +55,8 @@
# https://github.com/openstack/placement/blob/master/placement/
# db/constants.py#L16
PLACEMENT_MAX_INT = 0x7FFFFFFF
+ BANDWIDTH_1 = 1000
+ BANDWIDTH_2 = 2000
@classmethod
def setup_clients(cls):
@@ -62,9 +64,12 @@
cls.placement_client = cls.os_admin.placement_client
cls.networks_client = cls.os_admin.networks_client
cls.subnets_client = cls.os_admin.subnets_client
+ cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_adm.routers_client
cls.qos_client = cls.os_admin.qos_client
cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+ cls.flavors_client = cls.os_adm.flavors_client
+ cls.servers_client = cls.os_adm.servers_client
@classmethod
def skip_checks(cls):
@@ -74,6 +79,10 @@
"placement based QoS allocation."
raise cls.skipException(msg)
+ def setUp(self):
+ super(MinBwAllocationPlacementTest, self).setUp()
+ self._check_if_allocation_is_possible()
+
def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
policy = self.qos_client.create_qos_policy(
name=data_utils.rand_name(name_prefix),
@@ -93,7 +102,7 @@
return policy
- def _create_qos_policies(self):
+ def _create_qos_basic_policies(self):
self.qos_policy_valid = self._create_policy_and_min_bw_rule(
name_prefix='test_policy_valid',
min_kbps=self.SMALLEST_POSSIBLE_BW)
@@ -101,7 +110,20 @@
name_prefix='test_policy_not_valid',
min_kbps=self.PLACEMENT_MAX_INT)
- def _create_network_and_qos_policies(self):
+ def _create_qos_policies_from_life(self):
+ # For tempest-slow the max bandwidth configured is 1000000,
+ # https://opendev.org/openstack/tempest/src/branch/master/
+ # .zuul.yaml#L416-L420
+ self.qos_policy_1 = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_1',
+ min_kbps=self.BANDWIDTH_1
+ )
+ self.qos_policy_2 = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_2',
+ min_kbps=self.BANDWIDTH_2
+ )
+
+ def _create_network_and_qos_policies(self, policy_method):
physnet_name = CONF.network_feature_enabled.qos_placement_physnet
base_segm = \
CONF.network_feature_enabled.provider_net_base_segmentation_id
@@ -117,7 +139,7 @@
'provider:segmentation_id': base_segm
})
- self._create_qos_policies()
+ policy_method()
def _check_if_allocation_is_possible(self):
alloc_candidates = self.placement_client.list_allocation_candidates(
@@ -139,8 +161,43 @@
self.fail('For %s:%s there should be no available candidate!' %
(self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+ def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
+ wait_until = (None if status == 'ERROR' else status)
+ port = self.create_port(
+ self.prov_network['id'], qos_policy_id=qos_policy_id)
+
+ server = self.create_server(networks=[{'port': port['id']}],
+ wait_until=wait_until)
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status=status, ready_wait=False, raise_on_error=False)
+ return server, port
+
+ def _assert_allocation_is_as_expected(self, consumer, port_ids,
+ min_kbps=SMALLEST_POSSIBLE_BW):
+ allocations = self.placement_client.list_allocations(
+ consumer)['allocations']
+ self.assertGreater(len(allocations), 0)
+ bw_resource_in_alloc = False
+ for rp, resources in allocations.items():
+ if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+ self.assertEqual(
+ min_kbps,
+ resources['resources'][self.INGRESS_RESOURCE_CLASS])
+ bw_resource_in_alloc = True
+ allocation_rp = rp
+ if min_kbps:
+ self.assertTrue(bw_resource_in_alloc)
+
+ # Check binding_profile of the port is not empty and equals with
+ # the rp uuid
+ for port_id in port_ids:
+ port = self.os_admin.ports_client.show_port(port_id)
+ self.assertEqual(
+ allocation_rp,
+ port['port']['binding:profile']['allocation'])
+
@decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
- @decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_qos_min_bw_allocation_basic(self):
""""Basic scenario with QoS min bw allocation in placement.
@@ -161,41 +218,14 @@
* Create port with invalid QoS policy, and try to boot VM with that,
it should fail.
"""
+ self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ server1, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
+ self._assert_allocation_is_as_expected(server1['id'],
+ [valid_port['id']])
- self._check_if_allocation_is_possible()
-
- self._create_network_and_qos_policies()
-
- valid_port = self.create_port(
- self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
-
- server1 = self.create_server(
- networks=[{'port': valid_port['id']}])
- allocations = self.placement_client.list_allocations(server1['id'])
-
- self.assertGreater(len(allocations['allocations']), 0)
- bw_resource_in_alloc = False
- for rp, resources in allocations['allocations'].items():
- if self.INGRESS_RESOURCE_CLASS in resources['resources']:
- bw_resource_in_alloc = True
- allocation_rp = rp
- self.assertTrue(bw_resource_in_alloc)
- # Check that binding_profile of the port is not empty and equals with
- # the rp uuid
- port = self.os_admin.ports_client.show_port(valid_port['id'])
- self.assertEqual(allocation_rp,
- port['port']['binding:profile']['allocation'])
-
- # boot another vm with max int bandwidth
- not_valid_port = self.create_port(
- self.prov_network['id'],
- qos_policy_id=self.qos_policy_not_valid['id'])
- server2 = self.create_server(
- wait_until=None,
- networks=[{'port': not_valid_port['id']}])
- waiters.wait_for_server_status(
- client=self.os_primary.servers_client, server_id=server2['id'],
- status='ERROR', ready_wait=False, raise_on_error=False)
+ server2, not_valid_port = self._boot_vm_with_min_bw(
+ self.qos_policy_not_valid['id'], status='ERROR')
allocations = self.placement_client.list_allocations(server2['id'])
self.assertEqual(0, len(allocations['allocations']))
@@ -205,3 +235,253 @@
# Check that binding_profile of the port is empty
port = self.os_admin.ports_client.show_port(not_valid_port['id'])
self.assertEqual(0, len(port['port']['binding:profile']))
+
+ @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @utils.services('compute', 'network')
+ def test_migrate_with_qos_min_bw_allocation(self):
+ """Scenario to migrate VM with QoS min bw allocation in placement
+
+ Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+ checks, and
+ * migrate the server
+ * confirm the resize, if the VM state is VERIFY_RESIZE
+ * If the VM goes to ACTIVE state check that allocations are as
+ expected.
+ """
+ self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ server, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ self.servers_client.migrate_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+ # TODO(lajoskatona): Check that the allocations are ok for the
+ # migration?
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ self.servers_client.confirm_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @utils.services('compute', 'network')
+ def test_resize_with_qos_min_bw_allocation(self):
+ """Scenario to resize VM with QoS min bw allocation in placement.
+
+ Boot a VM like in test_qos_min_bw_allocation_basic, do the same
+ checks, and
+ * resize the server with new flavor
+ * confirm the resize, if the VM state is VERIFY_RESIZE
+ * If the VM goes to ACTIVE state check that allocations are as
+ expected.
+ """
+ self._create_network_and_qos_policies(self._create_qos_basic_policies)
+ server, valid_port = self._boot_vm_with_min_bw(
+ qos_policy_id=self.qos_policy_valid['id'])
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ old_flavor = self.flavors_client.show_flavor(
+ CONF.compute.flavor_ref)['flavor']
+ new_flavor = self.flavors_client.create_flavor(**{
+ 'ram': old_flavor['ram'],
+ 'vcpus': old_flavor['vcpus'],
+ 'name': old_flavor['name'] + 'extra',
+ 'disk': old_flavor['disk'] + 1
+ })['flavor']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.flavors_client.delete_flavor, new_flavor['id'])
+
+ self.servers_client.resize_server(
+ server_id=server['id'], flavor_ref=new_flavor['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+ # TODO(lajoskatona): Check that the allocations are ok for the
+ # migration?
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ self.servers_client.confirm_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+ self._assert_allocation_is_as_expected(server['id'],
+ [valid_port['id']])
+
+ @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_update_policy(self):
+ """Test the update of QoS policy on bound port
+
+ Related RFE in neutron: #1882804
+ The scenario is the following:
+ * Have a port with QoS policy and minimum bandwidth rule.
+ * Boot a VM with the port.
+ * Update the port with a new policy with different minimum bandwidth
+ values.
+ * The allocation on placement side should be according to the new
+ rules.
+ """
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ self._create_network_and_qos_policies(
+ self._create_qos_policies_from_life)
+
+ port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port['id']}])
+
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
+
+ self.ports_client.update_port(
+ port['id'],
+ **{'qos_policy_id': self.qos_policy_2['id']})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_2)
+
+ # I changed my mind
+ self.ports_client.update_port(
+ port['id'],
+ **{'qos_policy_id': self.qos_policy_1['id']})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
+
+ # bad request....
+ self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_not_valid',
+ min_kbps=self.PLACEMENT_MAX_INT)
+ port_orig = self.ports_client.show_port(port['id'])['port']
+ self.assertRaises(
+ lib_exc.Conflict,
+ self.ports_client.update_port,
+ port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
+
+ port_upd = self.ports_client.show_port(port['id'])['port']
+ self.assertEqual(port_orig['qos_policy_id'],
+ port_upd['qos_policy_id'])
+ self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id'])
+
+ @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_update_policy_from_zero(self):
+ """Test port without QoS policy to have QoS policy
+
+ This scenario checks if updating a port without QoS policy to
+ have QoS policy with minimum_bandwidth rule succeeds only on
+ controlplane, but placement allocation remains 0.
+ """
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ self._create_network_and_qos_policies(
+ self._create_qos_policies_from_life)
+
+ port = self.create_port(self.prov_network['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port['id']}])
+
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+ self.ports_client.update_port(
+ port['id'], **{'qos_policy_id': self.qos_policy_2['id']})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+ @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_update_policy_to_zero(self):
+ """Test port with QoS policy to remove QoS policy
+
+ In this scenario port with QoS minimum_bandwidth rule update to
+ remove QoS policy results in 0 placement allocation.
+ """
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ self._create_network_and_qos_policies(
+ self._create_qos_policies_from_life)
+
+ port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port['id']}])
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
+
+ self.ports_client.update_port(
+ port['id'],
+ **{'qos_policy_id': None})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0)
+
+ @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_update_with_multiple_ports(self):
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ self._create_network_and_qos_policies(
+ self._create_qos_policies_from_life)
+
+ port1 = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+ port2 = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_2['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port1['id']}, {'port': port2['id']}])
+ self._assert_allocation_is_as_expected(
+ server1['id'], [port1['id'], port2['id']],
+ self.BANDWIDTH_1 + self.BANDWIDTH_2)
+
+ self.ports_client.update_port(
+ port1['id'],
+ **{'qos_policy_id': self.qos_policy_2['id']})
+ self._assert_allocation_is_as_expected(
+ server1['id'], [port1['id'], port2['id']],
+ 2 * self.BANDWIDTH_2)
+
+ @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b')
+ @utils.services('compute', 'network')
+ def test_empty_update(self):
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ self._create_network_and_qos_policies(
+ self._create_qos_policies_from_life)
+
+ port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_1['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port['id']}])
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
+ self.ports_client.update_port(
+ port['id'],
+ **{'description': 'foo'})
+ self._assert_allocation_is_as_expected(server1['id'], [port['id']],
+ self.BANDWIDTH_1)
diff --git a/tempest/scenario/test_network_v6.py b/tempest/scenario/test_network_v6.py
index 14f24c7..9be28c4 100644
--- a/tempest/scenario/test_network_v6.py
+++ b/tempest/scenario/test_network_v6.py
@@ -218,7 +218,7 @@
guest_has_address,
CONF.validation.ping_timeout, 1, ssh, ip)
if not result:
- self._log_console_output(servers=[srv])
+ self.log_console_output(servers=[srv])
self.fail(
'Address %s not configured for instance %s, '
'ip address output is\n%s' %
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 3fc93e4..496a371 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -217,7 +217,7 @@
direction='ingress',
)
sec_group_rules_client = tenant.manager.security_group_rules_client
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=access_sg,
sec_group_rules_client=sec_group_rules_client,
**ssh_rule)
@@ -385,7 +385,7 @@
remote_group_id=tenant.security_groups['default']['id'],
direction='ingress'
)
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=tenant.security_groups['default'],
security_groups_client=tenant.manager.security_groups_client,
**ruleset
@@ -413,7 +413,7 @@
protocol = ruleset['protocol']
sec_group_rules_client = (
dest_tenant.manager.security_group_rules_client)
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
sec_group_rules_client=sec_group_rules_client,
**ruleset
@@ -429,7 +429,7 @@
# allow reverse traffic and check
sec_group_rules_client = (
source_tenant.manager.security_group_rules_client)
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
sec_group_rules_client=sec_group_rules_client,
**ruleset
@@ -464,9 +464,9 @@
def _log_console_output_for_all_tenants(self):
for tenant in self.tenants.values():
client = tenant.manager.servers_client
- self._log_console_output(servers=tenant.servers, client=client)
+ self.log_console_output(servers=tenant.servers, client=client)
if tenant.access_point is not None:
- self._log_console_output(
+ self.log_console_output(
servers=[tenant.access_point], client=client)
def _create_protocol_ruleset(self, protocol, port=80):
@@ -543,7 +543,7 @@
direction='ingress',
)
sec_group_rules_client = new_tenant.manager.security_group_rules_client
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=new_sg,
sec_group_rules_client=sec_group_rules_client,
**icmp_rule)
@@ -596,7 +596,7 @@
protocol='icmp',
direction='ingress'
)
- self._create_security_group_rule(
+ self.create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 8aa729b..990b325 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -37,7 +37,7 @@
@classmethod
def setup_credentials(cls):
- cls.set_network_resources()
+ cls.set_network_resources(network=True, subnet=True)
super(TestServerAdvancedOps, cls).setup_credentials()
@decorators.attr(type='slow')
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 02bc692..60242d5 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -67,7 +67,10 @@
def verify_metadata(self):
if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
# Verify metadata service
- md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+ if CONF.network.public_network_id:
+ md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'
+ else:
+ md_url = 'http://169.254.169.254/latest/meta-data/local-ipv4'
def exec_cmd_and_verify_output():
cmd = 'curl ' + md_url
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..ed06898 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -33,9 +33,18 @@
* shelve the instance
* unshelve the instance
* check the existence of the timestamp file in the unshelved instance
+ * check the existence of the timestamp file in the unshelved instance,
+ after a cold migrate
"""
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def setup_clients(cls):
+ super(TestShelveInstance, cls).setup_clients()
+ cls.admin_servers_client = cls.os_admin.servers_client
+
@classmethod
def skip_checks(cls):
super(TestShelveInstance, cls).skip_checks()
@@ -50,7 +59,21 @@
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
- def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False):
+ def _cold_migrate_server(self, server):
+ src_host = self.get_host_for_server(server['id'])
+
+ self.admin_servers_client.migrate_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize_server(server['id'])
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ dst_host = self.get_host_for_server(server['id'])
+ self.assertNotEqual(src_host, dst_host)
+
+ def _create_server_then_shelve_and_unshelve(self, boot_from_volume=False,
+ cold_migrate=False):
keypair = self.create_keypair()
security_group = self._create_security_group()
@@ -71,6 +94,10 @@
# with the instance snapshot
self._shelve_then_unshelve_server(server)
+ if cold_migrate:
+ # Prevent bug #1732428 from coming back
+ self._cold_migrate_server(server)
+
timestamp2 = self.get_timestamp(instance_ip,
private_key=keypair['private_key'],
server=server)
@@ -91,3 +118,18 @@
@utils.services('compute', 'volume', 'network', 'image')
def test_shelve_volume_backed_instance(self):
self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('1295fd9e-193a-4cf8-b211-55358e021bae')
+ @testtools.skipUnless(CONF.network.public_network_id,
+ 'The public_network_id option must be specified.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration not available.')
+ @testtools.skipUnless(CONF.compute_feature_enabled.shelve_migrate,
+ 'Shelve migrate not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @utils.services('compute', 'network', 'image')
+ def test_cold_migrate_unshelved_instance(self):
+ self._create_server_then_shelve_and_unshelve(cold_migrate=True)
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index c3b3670..a8e4c30 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
import testtools
from tempest.common import utils
@@ -24,7 +23,6 @@
from tempest.scenario import manager
CONF = config.CONF
-LOG = logging.getLogger(__name__)
class TestStampPattern(manager.ScenarioTest):
diff --git a/tempest/test.py b/tempest/test.py
index f383bc1..68602d6 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -38,12 +38,6 @@
CONF = config.CONF
-# TODO(oomichi): This test.idempotent_id should be removed after all projects
-# switch to use decorators.idempotent_id.
-idempotent_id = debtcollector.moves.moved_function(
- decorators.idempotent_id, 'idempotent_id', __name__,
- version='Mitaka', removal_version='?')
-
attr = debtcollector.moves.moved_function(
decorators.attr, 'attr', __name__,
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 5d9ddfa..ec7b760 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -29,10 +29,6 @@
from tempest.lib.common.utils import data_utils
from tempest.tests import base
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
@@ -72,6 +68,11 @@
class TestRunReturnCode(base.TestCase):
+
+ exclude_regex = '--exclude-regex'
+ exclude_list = '--exclude-list'
+ include_list = '--include-list'
+
def setUp(self):
super(TestRunReturnCode, self).setUp()
# Setup test dirs
@@ -96,6 +97,14 @@
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
+ def _get_test_list_file(self, content):
+ fd, path = tempfile.mkstemp()
+ self.addCleanup(os.remove, path)
+ test_file = os.fdopen(fd, 'wb', 0)
+ self.addCleanup(test_file.close)
+ test_file.write(content.encode('utf-8'))
+ return path
+
def assertRunExit(self, cmd, expected):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@@ -119,19 +128,23 @@
subprocess.call(['stestr', 'init'])
self.assertRunExit(['tempest', 'run', '--regex', 'failing'], 1)
- def test_tempest_run_blackregex_failing(self):
- self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+ def test_tempest_run_exclude_regex_failing(self):
+ self.assertRunExit(['tempest', 'run',
+ self.exclude_regex, 'failing'], 0)
- def test_tempest_run_blackregex_failing_with_stestr_repository(self):
+ def test_tempest_run_exclude_regex_failing_with_stestr_repository(self):
subprocess.call(['stestr', 'init'])
- self.assertRunExit(['tempest', 'run', '--black-regex', 'failing'], 0)
+ self.assertRunExit(['tempest', 'run',
+ self.exclude_regex, 'failing'], 0)
- def test_tempest_run_blackregex_passing(self):
- self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+ def test_tempest_run_exclude_regex_passing(self):
+ self.assertRunExit(['tempest', 'run',
+ self.exclude_regex, 'passing'], 1)
- def test_tempest_run_blackregex_passing_with_stestr_repository(self):
+ def test_tempest_run_exclude_regex_passing_with_stestr_repository(self):
subprocess.call(['stestr', 'init'])
- self.assertRunExit(['tempest', 'run', '--black-regex', 'passing'], 1)
+ self.assertRunExit(['tempest', 'run',
+ self.exclude_regex, 'passing'], 1)
def test_tempest_run_fails(self):
self.assertRunExit(['tempest', 'run'], 1)
@@ -149,52 +162,35 @@
]
# NOTE(mtreinish): on python 3 the subprocess prints b'' around
# stdout.
- if six.PY3:
- result = ["b\'" + x + "\'" for x in result]
+ result = ["b\'" + x + "\'" for x in result]
self.assertEqual(result, tests)
def test_tempest_run_with_worker_file(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- worker_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(worker_file.close)
- worker_file.write(
- '- worker:\n - passing\n concurrency: 3'.encode('utf-8'))
+ path = self._get_test_list_file(
+ '- worker:\n - passing\n concurrency: 3')
self.assertRunExit(['tempest', 'run', '--worker-file=%s' % path], 0)
- def test_tempest_run_with_whitelist(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- whitelist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(whitelist_file.close)
- whitelist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path], 0)
+ def test_tempest_run_with_include_list(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.include_list, path)], 0)
- def test_tempest_run_with_whitelist_regex_include_pass_check_fail(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- whitelist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(whitelist_file.close)
- whitelist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ def test_tempest_run_with_include_regex_include_pass_check_fail(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.include_list, path),
'--regex', 'fail'], 1)
- def test_tempest_run_with_whitelist_regex_include_pass_check_pass(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- whitelist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(whitelist_file.close)
- whitelist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ def test_tempest_run_with_include_regex_include_pass_check_pass(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.include_list, path),
'--regex', 'passing'], 0)
- def test_tempest_run_with_whitelist_regex_include_fail_check_pass(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- whitelist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(whitelist_file.close)
- whitelist_file.write('failing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--whitelist-file=%s' % path,
+ def test_tempest_run_with_include_regex_include_fail_check_pass(self):
+ path = self._get_test_list_file('failing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.include_list, path),
'--regex', 'pass'], 1)
def test_tempest_run_passes_with_config_file(self):
@@ -202,50 +198,75 @@
'--config-file', self.stestr_conf_file,
'--regex', 'passing'], 0)
- def test_tempest_run_with_blacklist_failing(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- blacklist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(blacklist_file.close)
- blacklist_file.write('failing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 0)
+ def test_tempest_run_with_exclude_list_failing(self):
+ path = self._get_test_list_file('failing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.exclude_list, path)], 0)
- def test_tempest_run_with_blacklist_passing(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- blacklist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(blacklist_file.close)
- blacklist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path], 1)
+ def test_tempest_run_with_exclude_list_passing(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.exclude_list, path)], 1)
- def test_tempest_run_with_blacklist_regex_exclude_fail_check_pass(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- blacklist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(blacklist_file.close)
- blacklist_file.write('failing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ def test_tempest_run_with_exclude_list_regex_exclude_fail_check_pass(self):
+ path = self._get_test_list_file('failing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.exclude_list, path),
'--regex', 'pass'], 0)
- def test_tempest_run_with_blacklist_regex_exclude_pass_check_pass(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- blacklist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(blacklist_file.close)
- blacklist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ def test_tempest_run_with_exclude_list_regex_exclude_pass_check_pass(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.exclude_list, path),
'--regex', 'pass'], 1)
- def test_tempest_run_with_blacklist_regex_exclude_pass_check_fail(self):
- fd, path = tempfile.mkstemp()
- self.addCleanup(os.remove, path)
- blacklist_file = os.fdopen(fd, 'wb', 0)
- self.addCleanup(blacklist_file.close)
- blacklist_file.write('passing'.encode('utf-8'))
- self.assertRunExit(['tempest', 'run', '--blacklist-file=%s' % path,
+ def test_tempest_run_with_exclude_list_regex_exclude_pass_check_fail(self):
+ path = self._get_test_list_file('passing')
+ self.assertRunExit(['tempest', 'run',
+ '%s=%s' % (self.exclude_list, path),
'--regex', 'fail'], 1)
+class TestOldArgRunReturnCode(TestRunReturnCode):
+ """A class for testing deprecated but still supported args.
+
+ This class will be removed once we remove the following arguments:
+ * --black-regex
+ * --blacklist-file
+ * --whitelist-file
+ """
+ exclude_regex = '--black-regex'
+ exclude_list = '--blacklist-file'
+ include_list = '--whitelist-file'
+
+ def _test_args_passing(self, args):
+ self.assertRunExit(['tempest', 'run'] + args, 0)
+
+ def test_tempest_run_new_old_arg_comb(self):
+ path = self._get_test_list_file('failing')
+ self._test_args_passing(['--black-regex', 'failing',
+ '--exclude-regex', 'failing'])
+ self._test_args_passing(['--blacklist-file=' + path,
+ '--exclude-list=' + path])
+ path = self._get_test_list_file('passing')
+ self._test_args_passing(['--whitelist-file=' + path,
+ '--include-list=' + path])
+
+ def _test_args_passing_with_stestr_repository(self, args):
+ subprocess.call(['stestr', 'init'])
+ self.assertRunExit(['tempest', 'run'] + args, 0)
+
+ def test_tempest_run_new_old_arg_comb_with_stestr_repository(self):
+ path = self._get_test_list_file('failing')
+ self._test_args_passing_with_stestr_repository(
+ ['--black-regex', 'failing', '--exclude-regex', 'failing'])
+ self._test_args_passing_with_stestr_repository(
+ ['--blacklist-file=' + path, '--exclude-list=' + path])
+ path = self._get_test_list_file('passing')
+ self._test_args_passing_with_stestr_repository(
+ ['--whitelist-file=' + path, '--include-list=' + path])
+
+
class TestConfigPathCheck(base.TestCase):
def setUp(self):
super(TestConfigPathCheck, self).setUp()
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 0ef3742..374474d 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -173,10 +173,15 @@
@mock.patch.object(cf, 'get_credentials')
def test_get_configured_admin_credentials(self, mock_get_credentials):
cfg.CONF.set_default('auth_version', 'v3', 'identity')
- all_params = [('admin_username', 'username', 'my_name'),
- ('admin_password', 'password', 'secret'),
- ('admin_project_name', 'project_name', 'my_pname'),
- ('admin_domain_name', 'domain_name', 'my_dname')]
+ all_params = [
+ ('admin_username', 'username', 'my_name'),
+ ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+ ('admin_password', 'password', 'secret'),
+ ('admin_project_name', 'project_name', 'my_pname'),
+ ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+ ('admin_domain_name', 'domain_name', 'my_dname'),
+ ('admin_system', 'system', None),
+ ]
expected_result = 'my_admin_credentials'
mock_get_credentials.return_value = expected_result
for config_item, _, value in all_params:
@@ -194,10 +199,15 @@
def test_get_configured_admin_credentials_not_fill_valid(
self, mock_get_credentials):
cfg.CONF.set_default('auth_version', 'v2', 'identity')
- all_params = [('admin_username', 'username', 'my_name'),
- ('admin_password', 'password', 'secret'),
- ('admin_project_name', 'project_name', 'my_pname'),
- ('admin_domain_name', 'domain_name', 'my_dname')]
+ all_params = [
+ ('admin_username', 'username', 'my_name'),
+ ('admin_user_domain_name', 'user_domain_name', 'my_dname'),
+ ('admin_password', 'password', 'secret'),
+ ('admin_project_domain_name', 'project_domain_name', 'my_dname'),
+ ('admin_project_name', 'project_name', 'my_pname'),
+ ('admin_domain_name', 'domain_name', 'my_dname'),
+ ('admin_system', 'system', None),
+ ]
expected_result = mock.Mock()
expected_result.is_valid.return_value = True
mock_get_credentials.return_value = expected_result
@@ -278,3 +288,20 @@
mock_auth_get_credentials.assert_called_once_with(
expected_uri, fill_in=False, identity_version='v3',
**expected_params)
+
+ @mock.patch('tempest.lib.auth.get_credentials')
+ def test_get_credentials_v3_system(self, mock_auth_get_credentials):
+ expected_uri = 'V3_URI'
+ expected_result = 'my_creds'
+ mock_auth_get_credentials.return_value = expected_result
+ cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
+ cfg.CONF.set_default('admin_system', 'all', 'auth')
+ params = {'system': 'all'}
+ expected_params = params.copy()
+ expected_params.update(config.service_client_config())
+ result = cf.get_credentials(fill_in=False, identity_version='v3',
+ **params)
+ self.assertEqual(expected_result, result)
+ mock_auth_get_credentials.assert_called_once_with(
+ expected_uri, fill_in=False, identity_version='v3',
+ **expected_params)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f03c7cc..d64d7b0 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -20,6 +20,7 @@
from tempest.common import waiters
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import servers_client
from tempest.lib.services.volume.v2 import volumes_client
from tempest.tests import base
import tempest.tests.utils as utils
@@ -55,6 +56,70 @@
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
+ def test_wait_for_image_imported_to_stores(self):
+ self.client.show_image.return_value = ({'status': 'active',
+ 'stores': 'fake_store'})
+ start_time = int(time.time())
+ waiters.wait_for_image_imported_to_stores(
+ self.client, 'fake_image_id', 'fake_store')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_imported_to_stores_failure(self):
+ time_mock = self.patch('time.time')
+ client = mock.MagicMock()
+ client.build_timeout = 2
+ self.patch('time.time', side_effect=[0., 1., 2.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ client.show_image.return_value = ({
+ 'status': 'saving',
+ 'stores': 'fake_store',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.OtherRestClientException,
+ waiters.wait_for_image_imported_to_stores,
+ client, 'fake_image_id', 'fake_store')
+
+ def test_wait_for_image_imported_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ client = mock.MagicMock()
+ client.build_timeout = 2
+ self.patch('time.time', side_effect=[0., 1., 2.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ client.show_image.return_value = ({
+ 'status': 'saving',
+ 'stores': 'fake_store'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_imported_to_stores,
+ client, 'fake_image_id', 'fake_store')
+
+ def test_wait_for_image_copied_to_stores(self):
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': '',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ start_time = int(time.time())
+ waiters.wait_for_image_copied_to_stores(
+ self.client, 'fake_image_id')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_copied_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ self.patch('time.time', side_effect=[0., 1.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': 'processing',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_copied_to_stores,
+ self.client, 'fake_image_id')
+
class TestInterfaceWaiters(base.TestCase):
@@ -334,3 +399,54 @@
uuids.attachment_id)
# Assert that show volume is only called once before we return
show_volume.assert_called_once_with(uuids.volume_id)
+
+ def test_wait_for_volume_attachment_remove_from_server(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ volume_not_attached = {"volumeAttachments": []}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_not_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ waiters.wait_for_volume_attachment_remove_from_server(
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
+
+ def test_wait_for_volume_attachment_remove_from_server_timeout(self):
+ volume_attached = {
+ "volumeAttachments": [{"volumeId": uuids.volume_id}]}
+ mock_list_volume_attachments = mock.Mock(
+ side_effect=[volume_attached, volume_attached])
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_interval=1,
+ build_timeout=1,
+ list_volume_attachments=mock_list_volume_attachments)
+ self.patch(
+ 'time.time',
+ side_effect=[0., 0.5, mock_client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_volume_attachment_remove_from_server,
+ mock_client, uuids.server_id, uuids.volume_id)
+
+ # Assert that list_volume_attachments is called until the attachment is
+ # removed.
+ mock_list_volume_attachments.assert_has_calls([
+ mock.call(uuids.server_id),
+ mock.call(uuids.server_id)])
diff --git a/tempest/tests/lib/common/test_cred_client.py b/tempest/tests/lib/common/test_cred_client.py
index 860a465..b99311c 100644
--- a/tempest/tests/lib/common/test_cred_client.py
+++ b/tempest/tests/lib/common/test_cred_client.py
@@ -43,6 +43,14 @@
self.projects_client.delete_tenant.assert_called_once_with(
'fake_id')
+ def test_get_credentials(self):
+ ret = self.creds_client.get_credentials(
+ {'name': 'some_user', 'id': 'fake_id'},
+ {'name': 'some_project', 'id': 'fake_id'},
+ 'password123')
+ self.assertEqual(ret.username, 'some_user')
+ self.assertEqual(ret.project_name, 'some_project')
+
class TestCredClientV3(base.TestCase):
def setUp(self):
@@ -53,7 +61,7 @@
self.roles_client = mock.MagicMock()
self.domains_client = mock.MagicMock()
self.domains_client.list_domains.return_value = {
- 'domains': [{'id': 'fake_domain_id'}]
+ 'domains': [{'id': 'fake_domain_id', 'name': 'some_domain'}]
}
self.creds_client = cred_client.V3CredsClient(self.identity_client,
self.projects_client,
@@ -75,3 +83,31 @@
self.creds_client.delete_project('fake_id')
self.projects_client.delete_project.assert_called_once_with(
'fake_id')
+
+ def test_get_credentials(self):
+ ret = self.creds_client.get_credentials(
+ {'name': 'some_user', 'id': 'fake_id'},
+ {'name': 'some_project', 'id': 'fake_id'},
+ 'password123')
+ self.assertEqual(ret.username, 'some_user')
+ self.assertEqual(ret.project_name, 'some_project')
+ self.assertIsNone(ret.system)
+ self.assertEqual(ret.domain_name, 'some_domain')
+ ret = self.creds_client.get_credentials(
+ {'name': 'some_user', 'id': 'fake_id'},
+ None,
+ 'password123',
+ domain={'name': 'another_domain', 'id': 'another_id'})
+ self.assertEqual(ret.username, 'some_user')
+ self.assertIsNone(ret.project_name)
+ self.assertIsNone(ret.system)
+ self.assertEqual(ret.domain_name, 'another_domain')
+ ret = self.creds_client.get_credentials(
+ {'name': 'some_user', 'id': 'fake_id'},
+ None,
+ 'password123',
+ system={'system': 'all'})
+ self.assertEqual(ret.username, 'some_user')
+ self.assertIsNone(ret.project_name)
+ self.assertEqual(ret.system, {'system': 'all'})
+ self.assertEqual(ret.domain_name, 'some_domain')
diff --git a/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
new file mode 100644
index 0000000..964c51b
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_identity_providers_client.py
@@ -0,0 +1,142 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import identity_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestIdentityProvidersClient(base.BaseServiceTest):
+ FAKE_IDENTITY_PROVIDERS_INFO = {
+ "identity_providers": [
+ {
+ "domain_id": "FAKE_DOMAIN_ID",
+ "description": "FAKE IDENTITY PROVIDER",
+ "remote_ids": ["fake_id_1", "fake_id_2"],
+ "enabled": True,
+ "id": "FAKE_ID",
+ "links": {
+ "protocols": "http://example.com/identity/v3/" +
+ "OS-FEDERATION/identity_providers/" +
+ "FAKE_ID/protocols",
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID"
+ }
+ }
+ ],
+ "links": {
+ "next": None,
+ "previous": None,
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers"
+ }
+ }
+
+ FAKE_IDENTITY_PROVIDER_INFO = {
+ "identity_provider": {
+ "authorization_ttl": None,
+ "domain_id": "FAKE_DOMAIN_ID",
+ "description": "FAKE IDENTITY PROVIDER",
+ "remote_ids": ["fake_id_1", "fake_id_2"],
+ "enabled": True,
+ "id": "ACME",
+ "links": {
+ "protocols": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID/protocols",
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID"
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestIdentityProvidersClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = identity_providers_client.IdentityProvidersClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_register_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.register_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_IDENTITY_PROVIDER_INFO,
+ bytes_body,
+ identity_provider_id="FAKE_ID",
+ status=201)
+
+ def _test_list_identity_providers(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_identity_providers,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_IDENTITY_PROVIDERS_INFO,
+ bytes_body,
+ status=200)
+
+ def _test_get_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.get_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_IDENTITY_PROVIDER_INFO,
+ bytes_body,
+ identity_provider_id="FAKE_ID",
+ status=200)
+
+ def _test_delete_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.delete_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ bytes_body,
+ identity_provider_id="FAKE_ID",
+ status=204)
+
+ def _test_update_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_IDENTITY_PROVIDER_INFO,
+ bytes_body,
+ identity_provider_id="FAKE_ID",
+ status=200)
+
+ def test_register_identity_provider_with_str_body(self):
+ self._test_register_identity_provider()
+
+ def test_register_identity_provider_with_bytes_body(self):
+ self._test_register_identity_provider(bytes_body=True)
+
+ def test_list_identity_providers_with_str_body(self):
+ self._test_list_identity_providers()
+
+ def test_list_identity_providers_with_bytes_body(self):
+ self._test_list_identity_providers(bytes_body=True)
+
+ def test_get_identity_provider_with_str_body(self):
+ self._test_get_identity_provider()
+
+ def test_get_identity_provider_with_bytes_body(self):
+ self._test_get_identity_provider(bytes_body=True)
+
+ def test_delete_identity_provider_with_str_body(self):
+ self._test_delete_identity_provider()
+
+ def test_delete_identity_provider_with_bytes_body(self):
+ self._test_delete_identity_provider(bytes_body=True)
+
+ def test_update_identity_provider_with_str_body(self):
+ self._test_update_identity_provider()
+
+ def test_update_identity_provider_with_bytes_body(self):
+ self._test_update_identity_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_mappings_client.py b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
new file mode 100644
index 0000000..845a3f9
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_mappings_client.py
@@ -0,0 +1,183 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import mappings_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestMappingsClient(base.BaseServiceTest):
+ FAKE_MAPPING_INFO = {
+ "mapping": {
+ "id": "fake123",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "mappings/fake123"
+ },
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ }
+ },
+ {
+ "group": {
+ "id": "0cd5e9"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "not_any_of": [
+ "Contractor",
+ "Guest"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ }
+
+ FAKE_MAPPINGS_INFO = {
+ "links": {
+ "next": None,
+ "previous": None,
+ "self": "http://example.com/identity/v3/OS-FEDERATION/mappings"
+ },
+ "mappings": [
+ {
+ "id": "fake123",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "mappings/fake123"
+ },
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ }
+ },
+ {
+ "group": {
+ "id": "0cd5e9"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "any_one_of": [
+ "Contractor",
+ "SubContractor"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestMappingsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = mappings_client.MappingsClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_create_mapping(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_mapping,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_MAPPING_INFO,
+ bytes_body,
+ mapping_id="fake123",
+ status=201)
+
+ def _test_get_mapping(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.get_mapping,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_MAPPING_INFO,
+ bytes_body,
+ mapping_id="fake123",
+ status=200)
+
+ def _test_update_mapping(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_mapping,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_MAPPING_INFO,
+ bytes_body,
+ mapping_id="fake123",
+ status=200)
+
+ def _test_list_mappings(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_mappings,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_MAPPINGS_INFO,
+ bytes_body,
+ status=200)
+
+ def _test_delete_mapping(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.delete_mapping,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ bytes_body,
+ mapping_id="fake123",
+ status=204)
+
+ def test_create_mapping_with_str_body(self):
+ self._test_create_mapping()
+
+ def test_create_mapping_with_bytes_body(self):
+ self._test_create_mapping(bytes_body=True)
+
+ def test_get_mapping_with_str_body(self):
+ self._test_get_mapping()
+
+ def test_get_mapping_with_bytes_body(self):
+ self._test_get_mapping(bytes_body=True)
+
+ def test_update_mapping_with_str_body(self):
+ self._test_update_mapping()
+
+ def test_update_mapping_with_bytes_body(self):
+ self._test_update_mapping(bytes_body=True)
+
+ def test_list_mappings_with_str_body(self):
+ self._test_list_mappings()
+
+ def test_list_mappings_with_bytes_body(self):
+ self._test_list_mappings(bytes_body=True)
+
+ def test_delete_mapping_with_str_body(self):
+ self._test_delete_mapping()
+
+ def test_delete_mapping_with_bytes_body(self):
+ self._test_delete_mapping(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_protocols_client.py b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
new file mode 100644
index 0000000..c1d04f4
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_protocols_client.py
@@ -0,0 +1,140 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import protocols_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestProtocolsClient(base.BaseServiceTest):
+ FAKE_PROTOCOLS_INFO = {
+ "links": {
+ "next": None,
+ "previous": None,
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID/protocols"
+ },
+ "protocols": [
+ {
+ "id": "fake_id1",
+ "links": {
+ "identity_provider": "http://example.com/identity/v3/" +
+ "OS-FEDERATION/identity_providers/" +
+ "FAKE_ID",
+ "self": "http://example.com/identity/v3/OS-FEDERATION/"
+ "identity_providers/FAKE_ID/protocols/fake_id1"
+ },
+ "mapping_id": "fake123"
+ }
+ ]
+ }
+
+ FAKE_PROTOCOL_INFO = {
+ "protocol": {
+ "id": "fake_id1",
+ "links": {
+ "identity_provider": "http://example.com/identity/v3/OS-" +
+ "FEDERATION/identity_providers/FAKE_ID",
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID/protocols/fake_id1"
+ },
+ "mapping_id": "fake123"
+ }
+ }
+
+ def setUp(self):
+ super(TestProtocolsClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = protocols_client.ProtocolsClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_add_protocol_to_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.add_protocol_to_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_PROTOCOL_INFO,
+ bytes_body,
+ idp_id="FAKE_ID",
+ protocol_id="fake_id1",
+ status=201)
+
+ def _test_list_protocols_of_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_protocols_of_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_PROTOCOLS_INFO,
+ bytes_body,
+ idp_id="FAKE_ID",
+ status=200)
+
+ def _test_get_protocol_for_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.get_protocol_for_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_PROTOCOL_INFO,
+ bytes_body,
+ idp_id="FAKE_ID",
+ protocol_id="fake_id1",
+ status=200)
+
+ def _test_update_mapping_for_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_mapping_for_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_PROTOCOL_INFO,
+ bytes_body,
+ idp_id="FAKE_ID",
+ protocol_id="fake_id1",
+ status=200)
+
+ def _test_delete_protocol_from_identity_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.delete_protocol_from_identity_provider,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ bytes_body,
+ idp_id="FAKE_ID",
+ protocol_id="fake_id1",
+ status=204)
+
+ def test_add_protocol_to_identity_provider_with_str_body(self):
+ self._test_add_protocol_to_identity_provider()
+
+ def test_add_protocol_to_identity_provider_with_bytes_body(self):
+ self._test_add_protocol_to_identity_provider(bytes_body=True)
+
+ def test_list_protocols_of_identity_provider_with_str_body(self):
+ self._test_list_protocols_of_identity_provider()
+
+ def test_list_protocols_of_identity_provider_with_bytes_body(self):
+ self._test_list_protocols_of_identity_provider(bytes_body=True)
+
+ def test_get_protocol_for_identity_provider_with_str_body(self):
+ self._test_get_protocol_for_identity_provider()
+
+ def test_get_protocol_for_identity_provider_with_bytes_body(self):
+ self._test_get_protocol_for_identity_provider(bytes_body=True)
+
+ def test_update_mapping_for_identity_provider_with_str_body(self):
+ self._test_update_mapping_for_identity_provider()
+
+ def test_update_mapping_for_identity_provider_with_bytes_body(self):
+ self._test_update_mapping_for_identity_provider(bytes_body=True)
+
+ def test_delete_protocol_from_identity_provider_with_str_body(self):
+ self._test_delete_protocol_from_identity_provider()
+
+ def test_delete_protocol_from_identity_provider_with_bytes_body(self):
+ self._test_delete_protocol_from_identity_provider(bytes_body=False)
diff --git a/tempest/tests/lib/services/identity/v3/test_roles_client.py b/tempest/tests/lib/services/identity/v3/test_roles_client.py
index 8d6bb42..e963310 100644
--- a/tempest/tests/lib/services/identity/v3/test_roles_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_roles_client.py
@@ -225,6 +225,16 @@
role_id="1234",
status=204)
+ def _test_create_user_role_on_system(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_user_role_on_system,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ user_id="123",
+ role_id="1234",
+ status=204)
+
def _test_list_user_roles_on_project(self, bytes_body=False):
self.check_service_client_function(
self.client.list_user_roles_on_project,
@@ -243,6 +253,14 @@
domain_id="b344506af7644f6794d9cb316600b020",
user_id="123")
+ def _test_list_user_roles_on_system(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_user_roles_on_system,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_ROLES,
+ bytes_body,
+ user_id="123")
+
def _test_create_group_role_on_project(self, bytes_body=False):
self.check_service_client_function(
self.client.create_group_role_on_project,
@@ -265,6 +283,16 @@
role_id="1234",
status=204)
+ def _test_create_group_role_on_system(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_group_role_on_system,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ {},
+ bytes_body,
+ group_id="123",
+ role_id="1234",
+ status=204)
+
def _test_list_group_roles_on_project(self, bytes_body=False):
self.check_service_client_function(
self.client.list_group_roles_on_project,
@@ -283,6 +311,15 @@
domain_id="b344506af7644f6794d9cb316600b020",
group_id="123")
+ def _test_list_group_roles_on_system(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_group_roles_on_system,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_ROLES,
+ bytes_body,
+ domain_id="b344506af7644f6794d9cb316600b020",
+ group_id="123")
+
def _test_create_role_inference_rule(self, bytes_body=False):
self.check_service_client_function(
self.client.create_role_inference_rule,
@@ -405,6 +442,15 @@
role_id="1234",
status=204)
+ def test_delete_role_from_user_on_system(self):
+ self.check_service_client_function(
+ self.client.delete_role_from_user_on_system,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ user_id="123",
+ role_id="1234",
+ status=204)
+
def test_delete_role_from_group_on_project(self):
self.check_service_client_function(
self.client.delete_role_from_group_on_project,
@@ -425,6 +471,15 @@
role_id="1234",
status=204)
+ def test_delete_role_from_group_on_system(self):
+ self.check_service_client_function(
+ self.client.delete_role_from_group_on_system,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ group_id="123",
+ role_id="1234",
+ status=204)
+
def test_check_user_role_existence_on_project(self):
self.check_service_client_function(
self.client.check_user_role_existence_on_project,
@@ -445,6 +500,15 @@
role_id="1234",
status=204)
+ def test_check_user_role_existence_on_system(self):
+ self.check_service_client_function(
+ self.client.check_user_role_existence_on_system,
+ 'tempest.lib.common.rest_client.RestClient.head',
+ {},
+ user_id="123",
+ role_id="1234",
+ status=204)
+
def test_check_role_from_group_on_project_existence(self):
self.check_service_client_function(
self.client.check_role_from_group_on_project_existence,
@@ -465,6 +529,15 @@
role_id="1234",
status=204)
+ def test_check_role_from_group_on_system_existence(self):
+ self.check_service_client_function(
+ self.client.check_role_from_group_on_system_existence,
+ 'tempest.lib.common.rest_client.RestClient.head',
+ {},
+ group_id="123",
+ role_id="1234",
+ status=204)
+
def test_create_role_inference_rule_with_str_body(self):
self._test_create_role_inference_rule()
diff --git a/tempest/tests/lib/services/identity/v3/test_service_providers_client.py b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
new file mode 100644
index 0000000..ec908bc
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_service_providers_client.py
@@ -0,0 +1,157 @@
+# Copyright 2020 Samsung Electronics Co., Ltd
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+from tempest.lib.services.identity.v3 import service_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServiceProvidersClient(base.BaseServiceTest):
+ FAKE_SERVICE_PROVIDER_INFO = {
+ "service_provider": {
+ "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/FAKE_ID/protocols/fake_id1/auth",
+ "description": "Fake Service Provider",
+ "enabled": True,
+ "id": "FAKE_ID",
+ "links": {
+ "self": "https://example.com/identity/v3/OS-FEDERATION/" +
+ "service_providers/FAKE_ID"
+ },
+ "relay_state_prefix": "ss:mem:",
+ "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+ "FAKE_ID1/ECP"
+ }
+ }
+
+ FAKE_SERVICE_PROVIDERS_INFO = {
+ "links": {
+ "next": None,
+ "previous": None,
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "service_providers"
+ },
+ "service_providers": [
+ {
+ "auth_url": "https://example.com/identity/v3/OS-FEDERATION/" +
+ "identity_providers/acme/protocols/saml2/auth",
+ "description": "Stores ACME identities",
+ "enabled": True,
+ "id": "ACME",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "service_providers/ACME"
+ },
+ "relay_state_prefix": "ss:mem:",
+ "sp_url": "https://example.com/identity/Shibboleth.sso/" +
+ "SAML2/ECP"
+ },
+ {
+ "auth_url": "https://other.example.com/identity/v3/" +
+ "OS-FEDERATION/identity_providers/acme/" +
+ "protocols/saml2/auth",
+ "description": "Stores contractor identities",
+ "enabled": False,
+ "id": "ACME-contractors",
+ "links": {
+ "self": "http://example.com/identity/v3/OS-FEDERATION/" +
+ "service_providers/ACME-contractors"
+ },
+ "relay_state_prefix": "ss:mem:",
+ "sp_url": "https://other.example.com/identity/Shibboleth" +
+ ".sso/SAML2/ECP"
+ }
+ ]
+ }
+
+ def setUp(self):
+ super(TestServiceProvidersClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = service_providers_client.ServiceProvidersClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_register_service_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.register_service_provider,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_SERVICE_PROVIDER_INFO,
+ bytes_body,
+ service_provider_id="FAKE_ID",
+ status=201)
+
+ def _test_list_service_providers(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_service_providers,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SERVICE_PROVIDERS_INFO,
+ bytes_body,
+ status=200)
+
+ def _test_get_service_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.get_service_provider,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SERVICE_PROVIDER_INFO,
+ bytes_body,
+ service_provider_id="FAKE_ID",
+ status=200)
+
+ def _test_delete_service_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.delete_service_provider,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ bytes_body,
+ service_provider_id="FAKE_ID",
+ status=204)
+
+ def _test_update_service_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_service_provider,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_SERVICE_PROVIDER_INFO,
+ bytes_body,
+ service_provider_id="FAKE_ID",
+ status=200)
+
+ def test_register_service_provider_with_str_body(self):
+ self._test_register_service_provider()
+
+ def test_register_service_provider_with_bytes_body(self):
+ self._test_register_service_provider(bytes_body=True)
+
+ def test_list_service_providers_with_str_body(self):
+ self._test_list_service_providers()
+
+ def test_list_service_providers_with_bytes_body(self):
+ self._test_list_service_providers(bytes_body=True)
+
+ def test_get_service_provider_with_str_body(self):
+ self._test_get_service_provider()
+
+ def test_get_service_provider_with_bytes_body(self):
+ self._test_get_service_provider(bytes_body=True)
+
+ def test_delete_service_provider_with_str_body(self):
+ self._test_delete_service_provider()
+
+ def test_delete_service_provider_with_bytes_body(self):
+ self._test_delete_service_provider(bytes_body=True)
+
+ def test_update_service_provider_with_str_body(self):
+ self._test_update_service_provider()
+
+ def test_update_service_provider_with_bytes_body(self):
+ self._test_update_service_provider(bytes_body=True)
diff --git a/tempest/tests/lib/services/identity/v3/test_trusts_client.py b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
index a1ca020..33dca7d 100644
--- a/tempest/tests/lib/services/identity/v3/test_trusts_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_trusts_client.py
@@ -94,6 +94,35 @@
}
}
+ FAKE_LIST_TRUSTS_ROLES = {
+ "roles": [
+ {
+ "id": "c1648e",
+ "links": {
+ "self": "http://example.com/identity/v3/roles/c1648e"
+ },
+ "name": "manager"
+ },
+ {
+ "id": "ed7b78",
+ "links": {
+ "self": "http://example.com/identity/v3/roles/ed7b78"
+ },
+ "name": "member"
+ }
+ ]
+ }
+
+ FAKE_TRUST_ROLE = {
+ "role": {
+ "id": "c1648e",
+ "links": {
+ "self": "http://example.com/identity/v3/roles/c1648e"
+ },
+ "name": "manager"
+ }
+ }
+
def setUp(self):
super(TestTrustsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -123,6 +152,43 @@
self.FAKE_LIST_TRUSTS,
bytes_body)
+ def _test_list_trust_roles(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_trust_roles,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_TRUSTS_ROLES,
+ bytes_body,
+ trust_id="1ff900")
+
+ def test_check_trust_role(self):
+ self.check_service_client_function(
+ self.client.check_trust_role,
+ 'tempest.lib.common.rest_client.RestClient.head',
+ {},
+ trust_id="1ff900",
+ role_id="ed7b78")
+
+ def _check_show_trust_role(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_trust_role,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_TRUST_ROLE,
+ bytes_body,
+ trust_id="1ff900",
+ role_id="ed7b78")
+
+ def test_list_trust_roles_with_str_body(self):
+ self._test_list_trust_roles()
+
+ def test_list_trust_roles_with_bytes_body(self):
+ self._test_list_trust_roles(bytes_body=True)
+
+ def test_check_show_trust_role_with_str_body(self):
+ self._check_show_trust_role()
+
+ def test_check_show_trust_role_with_bytes_body(self):
+ self._check_show_trust_role(bytes_body=True)
+
def test_create_trust_with_str_body(self):
self._test_create_trust()
diff --git a/tempest/tests/lib/services/object_storage/test_object_client.py b/tempest/tests/lib/services/object_storage/test_object_client.py
index c646d61..d6df243 100644
--- a/tempest/tests/lib/services/object_storage/test_object_client.py
+++ b/tempest/tests/lib/services/object_storage/test_object_client.py
@@ -31,15 +31,18 @@
self.object_client = object_client.ObjectClient(self.fake_auth,
'swift', 'region1')
- @mock.patch.object(object_client, '_create_connection')
+ @mock.patch('tempest.lib.services.object_storage.object_client.'
+ 'ObjectClient._create_connection')
def test_create_object_continue_no_data(self, mock_poc):
self._validate_create_object_continue(None, mock_poc)
- @mock.patch.object(object_client, '_create_connection')
+ @mock.patch('tempest.lib.services.object_storage.object_client.'
+ 'ObjectClient._create_connection')
def test_create_object_continue_with_data(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc)
- @mock.patch.object(object_client, '_create_connection')
+ @mock.patch('tempest.lib.services.object_storage.object_client.'
+ 'ObjectClient._create_connection')
def test_create_continue_with_no_continue_received(self, mock_poc):
self._validate_create_object_continue('hello', mock_poc,
initial_status=201)
diff --git a/tempest/tests/lib/test_auth.py b/tempest/tests/lib/test_auth.py
index c3a792f..3edb122 100644
--- a/tempest/tests/lib/test_auth.py
+++ b/tempest/tests/lib/test_auth.py
@@ -786,6 +786,19 @@
self.assertIn(attr, auth_params.keys())
self.assertEqual(getattr(all_creds, attr), auth_params[attr])
+ def test_auth_parameters_with_system_scope(self):
+ all_creds = fake_credentials.FakeKeystoneV3AllCredentials()
+ self.auth_provider.credentials = all_creds
+ self.auth_provider.scope = 'system'
+ auth_params = self.auth_provider._auth_params()
+ self.assertNotIn('scope', auth_params.keys())
+ for attr in all_creds.get_init_attributes():
+ if attr.startswith('project_') or attr.startswith('domain_'):
+ self.assertNotIn(attr, auth_params.keys())
+ else:
+ self.assertIn(attr, auth_params.keys())
+ self.assertEqual(getattr(all_creds, attr), auth_params[attr])
+
class TestKeystoneV3Credentials(base.TestCase):
def testSetAttrUserDomain(self):
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 6018441..1889420 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -19,7 +19,6 @@
from tempest.common import utils
from tempest import config
from tempest import exceptions
-from tempest.lib.common.utils import data_utils
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
@@ -33,47 +32,6 @@
fake_config.FakePrivate)
-# NOTE: The test module is for tempest.test.idempotent_id.
-# After all projects switch to use decorators.idempotent_id,
-# we can remove tempest.test.idempotent_id as well as this
-# test module
-class TestIdempotentIdDecorator(BaseDecoratorsTest):
-
- def _test_helper(self, _id, **decorator_args):
- @test.idempotent_id(_id)
- def foo():
- """Docstring"""
- pass
-
- return foo
-
- def _test_helper_without_doc(self, _id, **decorator_args):
- @test.idempotent_id(_id)
- def foo():
- pass
-
- return foo
-
- def test_positive(self):
- _id = data_utils.rand_uuid()
- foo = self._test_helper(_id)
- self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
- self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
- def test_positive_without_doc(self):
- _id = data_utils.rand_uuid()
- foo = self._test_helper_without_doc(_id)
- self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
-
- def test_idempotent_id_not_str(self):
- _id = 42
- self.assertRaises(TypeError, self._test_helper, _id)
-
- def test_idempotent_id_not_valid_uuid(self):
- _id = '42'
- self.assertRaises(ValueError, self._test_helper, _id)
-
-
class TestServicesDecorator(BaseDecoratorsTest):
def _test_services_helper(self, *decorator_args):
class TestFoo(test.BaseTestCase):
diff --git a/tools/check_logs.py b/tools/check_logs.py
index de7e41d..7e191a0 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -56,39 +56,39 @@
's-proxy'])
-def process_files(file_specs, url_specs, whitelists):
+def process_files(file_specs, url_specs, allow_lists):
regexp = re.compile(r"^.* (ERROR|CRITICAL|TRACE) .*\[.*\-.*\]")
logs_with_errors = []
for (name, filename) in file_specs:
- whitelist = whitelists.get(name, [])
+ allow_list = allow_lists.get(name, [])
with open(filename) as content:
- if scan_content(content, regexp, whitelist):
+ if scan_content(content, regexp, allow_list):
logs_with_errors.append(name)
for (name, url) in url_specs:
- whitelist = whitelists.get(name, [])
+ allow_list = allow_lists.get(name, [])
req = urlreq.Request(url)
req.add_header('Accept-Encoding', 'gzip')
page = urlreq.urlopen(req)
buf = six.StringIO(page.read())
f = gzip.GzipFile(fileobj=buf)
- if scan_content(f.read().splitlines(), regexp, whitelist):
+ if scan_content(f.read().splitlines(), regexp, allow_list):
logs_with_errors.append(name)
return logs_with_errors
-def scan_content(content, regexp, whitelist):
+def scan_content(content, regexp, allow_list):
had_errors = False
for line in content:
if not line.startswith("Stderr:") and regexp.match(line):
- whitelisted = False
- for w in whitelist:
+ allowed = False
+ for w in allow_list:
pat = ".*%s.*%s.*" % (w['module'].replace('.', '\\.'),
w['message'])
if re.match(pat, line):
- whitelisted = True
+ allowed = True
break
- if not whitelisted or dump_all_errors:
- if not whitelisted:
+ if not allowed or dump_all_errors:
+ if not allowed:
had_errors = True
return had_errors
@@ -105,9 +105,9 @@
print("Must provide exactly one of -d or -u")
return 1
print("Checking logs...")
- WHITELIST_FILE = os.path.join(
+ ALLOW_LIST_FILE = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
- "etc", "whitelist.yaml")
+ "etc", "allow-list.yaml")
file_matcher = re.compile(r".*screen-([\w-]+)\.log")
files = []
@@ -132,17 +132,17 @@
if m:
urls_to_process.append((m.group(1), u))
- whitelists = {}
- with open(WHITELIST_FILE) as stream:
+ allow_lists = {}
+ with open(ALLOW_LIST_FILE) as stream:
loaded = yaml.safe_load(stream)
if loaded:
for (name, l) in six.iteritems(loaded):
for w in l:
assert 'module' in w, 'no module in %s' % name
assert 'message' in w, 'no message in %s' % name
- whitelists = loaded
+ allow_lists = loaded
logs_with_errors = process_files(files_to_process, urls_to_process,
- whitelists)
+ allow_lists)
failed = False
if logs_with_errors:
@@ -164,14 +164,14 @@
usage = """
-Find non-white-listed log errors in log files from a devstack-gate run.
+Find non-allow-listed log errors in log files from a devstack-gate run.
Log files will be searched for ERROR or CRITICAL messages. If any
-error messages do not match any of the whitelist entries contained in
-etc/whitelist.yaml, those messages will be printed to the console and
+error messages do not match any of the allow-list entries contained in
+etc/allow-list.yaml, those messages will be printed to the console and
failure will be returned. A file directory containing logs or a url to the
log files of an OpenStack gate job can be provided.
-The whitelist yaml looks like:
+The allow-list yaml looks like:
log-name:
- module: "a.b.c"
@@ -179,7 +179,7 @@
- module: "a.b.c"
message: "regexp"
-repeated for each log file with a whitelist.
+repeated for each log file with an allow-list.
"""
parser = argparse.ArgumentParser(description=usage)
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 618c388..1b5b369 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -32,9 +32,9 @@
# List of projects having tempest plugin stale or unmaintained for a long time
# (6 months or more)
-# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
-# when the patches are merged.
-BLACKLIST = [
+# TODO(masayukig): Some of these can be removed from NON_ACTIVE_LIST in the
+# future when the patches are merged.
+NON_ACTIVE_LIST = [
'x/gce-api', # It looks gce-api doesn't support python3 yet.
'x/glare', # To avoid sanity-job failure
'x/group-based-policy', # It looks this doesn't support python3 yet.
@@ -52,8 +52,11 @@
'x/tap-as-a-service', # To avoid sanity-job failure
'x/valet', # https://review.opendev.org/#/c/638339/
'x/kingbird', # https://bugs.launchpad.net/kingbird/+bug/1869722
- # vmware-nsx is blacklisted since https://review.opendev.org/#/c/736952
+ # vmware-nsx is excluded since https://review.opendev.org/#/c/736952
'x/vmware-nsx-tempest-plugin',
+ # mogan is unmaintained now, remove from the list when this is merged:
+ # https://review.opendev.org/c/x/mogan/+/767718
+ 'x/mogan',
]
url = 'https://review.opendev.org/projects/'
@@ -86,10 +89,10 @@
False
-if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
- for black_plugin in BLACKLIST:
- print(black_plugin)
- # We just need BLACKLIST when we use this `blacklist` option.
+if len(sys.argv) > 1 and sys.argv[1] == 'nonactivelist':
+ for non_active_plugin in NON_ACTIVE_LIST:
+ print(non_active_plugin)
+ # We just need NON_ACTIVE_LIST when we use this `nonactivelist` option.
# So, this exits here.
sys.exit()
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index 33675ed..4430bbf 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -81,17 +81,17 @@
printf "\n\n"
-# Print BLACKLIST
-if [[ -r doc/source/data/tempest-blacklisted-plugins-registry.header ]]; then
- cat doc/source/data/tempest-blacklisted-plugins-registry.header
+# Print NON_ACTIVE_LIST
+if [[ -r doc/source/data/tempest-non-active-plugins-registry.header ]]; then
+ cat doc/source/data/tempest-non-active-plugins-registry.header
fi
-blacklist=$(python tools/generate-tempest-plugins-list.py blacklist)
-name_col_len=$(echo "${blacklist}" | wc -L)
+nonactivelist=$(python tools/generate-tempest-plugins-list.py nonactivelist)
+name_col_len=$(echo "${nonactivelist}" | wc -L)
name_col_len=$(( name_col_len + 20 ))
printf "\n\n"
-print_plugin_table "${blacklist}"
+print_plugin_table "${nonactivelist}"
printf "\n\n"
diff --git a/tools/tempest-integrated-gate-compute-blacklist.txt b/tools/tempest-integrated-gate-compute-exclude-list.txt
similarity index 60%
rename from tools/tempest-integrated-gate-compute-blacklist.txt
rename to tools/tempest-integrated-gate-compute-exclude-list.txt
index 2290751..8805262 100644
--- a/tools/tempest-integrated-gate-compute-blacklist.txt
+++ b/tools/tempest-integrated-gate-compute-exclude-list.txt
@@ -11,9 +11,3 @@
tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
tempest.scenario.test_volume_backup_restore.TestVolumeBackupRestore.test_volume_backup_restore
-
-# Skip test scenario when creating second image from instance
-# https://bugs.launchpad.net/tripleo/+bug/1881592
-# The test is most likely wrong and may fail if the fists image is create quickly.
-# FIXME: Either fix the test so it won't race or consider if we should cover the scenario at all.
-tempest.api.compute.images.test_images_oneserver_negative.ImagesOneServerNegativeTestJSON.test_create_second_image_when_first_image_is_being_saved
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-networking-blacklist.txt
rename to tools/tempest-integrated-gate-networking-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-object-storage-blacklist.txt b/tools/tempest-integrated-gate-object-storage-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-object-storage-blacklist.txt
rename to tools/tempest-integrated-gate-object-storage-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-placement-blacklist.txt
rename to tools/tempest-integrated-gate-placement-exclude-list.txt
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
new file mode 120000
index 0000000..2d691f8
--- /dev/null
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -0,0 +1 @@
+tempest-integrated-gate-storage-exclude-list.txt
\ No newline at end of file
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-exclude-list.txt
similarity index 100%
rename from tools/tempest-integrated-gate-storage-blacklist.txt
rename to tools/tempest-integrated-gate-storage-exclude-list.txt
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index c983da9..106a9c6 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -44,7 +44,7 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
-BLACKLIST="$(python tools/generate-tempest-plugins-list.py blacklist)"
+NON_ACTIVE_LIST="$(python tools/generate-tempest-plugins-list.py nonactivelist)"
# Function to clone project using zuul-cloner or from git
function clone_project {
@@ -117,8 +117,8 @@
failed_plugin=''
# Perform sanity on all tempest plugin projects
for project in $PROJECT_LIST; do
- # Remove blacklisted tempest plugins
- if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
+ # Remove non-active tempest plugins
+ if ! [[ `echo $NON_ACTIVE_LIST | grep -c $project ` -gt 0 ]]; then
plugin_sanity_check $project && passed_plugin+=", $project" || \
failed_plugin+="$project, " > $SANITY_DIR/$project.txt
fi
diff --git a/tox.ini b/tox.ini
index d8e059a..2315163 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist = pep8,py36,py38,bashate,pip-check-reqs
-minversion = 3.1.1
+minversion = 3.18.0
skipsdist = True
ignore_basepython_conflict = True
@@ -26,7 +26,7 @@
passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
install_command = pip install {opts} {packages}
-whitelist_externals = *
+allowlist_externals = *
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
@@ -108,7 +108,7 @@
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
commands =
find . -type f -name "*.pyc" -delete
tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' {posargs}
@@ -132,11 +132,11 @@
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
- tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-networking-blacklist.txt {posargs}
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-networking-exclude-list.txt {posargs}
[testenv:integrated-compute]
envdir = .tox/tempest
@@ -145,11 +145,11 @@
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
- tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-compute-blacklist.txt {posargs}
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
[testenv:integrated-placement]
envdir = .tox/tempest
@@ -158,11 +158,11 @@
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
- tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-placement-blacklist.txt {posargs}
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-placement-exclude-list.txt {posargs}
[testenv:integrated-storage]
envdir = .tox/tempest
@@ -171,11 +171,11 @@
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
- tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-storage-blacklist.txt {posargs}
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-storage-exclude-list.txt {posargs}
[testenv:integrated-object-storage]
envdir = .tox/tempest
@@ -184,11 +184,11 @@
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag and
-# tests listed in blacklist file:
+# tests listed in exclude-list file:
commands =
find . -type f -name "*.pyc" -delete
- tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
- tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --blacklist_file ./tools/tempest-integrated-gate-object-storage-blacklist.txt {posargs}
+ tempest run --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.api)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
+ tempest run --combine --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.scenario)' --exclude-list ./tools/tempest-integrated-gate-object-storage-exclude-list.txt {posargs}
[testenv:full-serial]
envdir = .tox/tempest
@@ -198,7 +198,7 @@
deps = {[tempestenv]deps}
# The regex below is used to select which tests to run and exclude the slow tag:
# See the testrepository bug: https://bugs.launchpad.net/testrepository/+bug/1208610
-# FIXME: We can replace it with the `--black-regex` option to exclude tests now.
+# FIXME: We can replace it with the `--exclude-regex` option to exclude tests now.
commands =
find . -type f -name "*.pyc" -delete
tempest run --serial --regex '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario))' {posargs}
@@ -290,12 +290,12 @@
sphinx-apidoc -f -o doc/source/tests/volume tempest/api/volume
rm -rf doc/build
sphinx-build -W -b html doc/source doc/build/html
-whitelist_externals =
+allowlist_externals =
rm
[testenv:pdf-docs]
deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
rm
make
commands =
@@ -369,7 +369,7 @@
rm -rf releasenotes/build
sphinx-build -a -E -W -d releasenotes/build/doctrees \
-b html releasenotes/source releasenotes/build/html
-whitelist_externals = rm
+allowlist_externals = rm
[testenv:bashate]
# if you want to test out some changes you have made to bashate
@@ -377,7 +377,7 @@
# modified bashate tree
deps =
{env:BASHATE_INSTALL_PATH:bashate}
-whitelist_externals = bash
+allowlist_externals = bash
commands = bash -c "find {toxinidir}/tools \
-not \( -type d -name .?\* -prune \) \
-type f \
@@ -406,6 +406,6 @@
[testenv:plugin-sanity-check]
# perform tempest plugin sanity
-whitelist_externals = bash
+allowlist_externals = bash
commands =
bash tools/tempest-plugin-sanity.sh
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
new file mode 100644
index 0000000..3deb944
--- /dev/null
+++ b/zuul.d/base.yaml
@@ -0,0 +1,86 @@
+- job:
+ name: devstack-tempest
+ parent: devstack
+ description: |
+ Base Tempest job.
+
+ This Tempest job provides the base for both the single and multi-node
+ test setup. To run a multi-node test inherit from devstack-tempest and
+ set the nodeset to a multi-node one.
+ required-projects: &base_required-projects
+ - opendev.org/openstack/tempest
+ timeout: 7200
+ roles: &base_roles
+ - zuul: opendev.org/openstack/devstack
+ vars: &base_vars
+ devstack_services:
+ tempest: true
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ compute:
+ min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+ test_results_stage_name: test_results
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+ '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+ '{{ devstack_base_dir }}/tempest/tempest.log': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+ '{{ stage_dir }}/stackviz': logs
+ extensions_to_txt:
+ conf: true
+ log: true
+ yaml: true
+ yml: true
+ run: playbooks/devstack-tempest.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: devstack-tempest-ipv6
+ parent: devstack-ipv6
+ description: |
+ Base Tempest IPv6 job. This job is derived from 'devstack-ipv6'
+ which set the IPv6-only setting for OpenStack services. As part of
+ run phase, this job will verify the IPv6 setting and check the services
+ endpoints and listen addresses are IPv6. Basically it will run the script
+ ./tool/verify-ipv6-only-deployments.sh
+
+ Child jobs of this job can run their own set of tests and can
+ add post-run playebooks to extend the IPv6 verification specific
+ to their deployed services.
+ Check the wiki page for more details about project jobs setup
+ - https://wiki.openstack.org/wiki/Goal-IPv6-only-deployments-and-testing
+ required-projects: *base_required-projects
+ timeout: 7200
+ roles: *base_roles
+ vars: *base_vars
+ run: playbooks/devstack-tempest-ipv6.yaml
+ post-run: playbooks/post-tempest.yaml
+
+- job:
+ name: tempest-multinode-full-base
+ parent: devstack-tempest
+ description: |
+ Base multinode integration test with Neutron networking and py27.
+ Former names for this job were:
+ * neutron-tempest-multinode-full
+ * legacy-tempest-dsvm-neutron-multinode-full
+ * gate-tempest-dsvm-neutron-multinode-full-ubuntu-xenial-nv
+ This job includes two nodes, controller / tempest plus a subnode, but
+ it can be used with different topologies, as long as a controller node
+ and a tempest one exist.
+ timeout: 10800
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE: false
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
+ group-vars:
+ peers:
+ devstack_localrc:
+ NOVA_ALLOW_MOVE_TO_SAME_HOST: false
+ LIVE_MIGRATION_AVAILABLE: true
+ USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION: true
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
new file mode 100644
index 0000000..41b1fa4
--- /dev/null
+++ b/zuul.d/integrated-gate.yaml
@@ -0,0 +1,439 @@
+# NOTE(gmann): This file includes all integrated jobs definition which
+# are supposed to be run by Tempest and other projects as
+# integrated testing.
+- job:
+ name: tempest-all
+ parent: devstack-tempest
+ description: |
+ Integration test that runs all tests.
+ Former name for this job was:
+ * legacy-periodic-tempest-dsvm-all-master
+ vars:
+ tox_envlist: all
+ tempest_test_regex: tempest
+ # TODO(gmann): Enable File injection tests once nova bug is fixed
+ # https://bugs.launchpad.net/nova/+bug/1882421
+ # devstack_localrc:
+ # ENABLE_FILE_INJECTION: true
+
+- job:
+ name: tempest-ipv6-only
+ parent: devstack-tempest-ipv6
+ # This currently works from stable/pike on.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Integration test of IPv6-only deployments. This job runs
+ smoke and IPv6 relates tests only. Basic idea is to test
+ whether OpenStack Services listen on IPv6 addrress or not.
+ timeout: 10800
+ vars:
+ tox_envlist: ipv6-only
+
+- job:
+ name: tempest-full
+ parent: devstack-tempest
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking and py27.
+ This job is supposed to run until stable/train setup only.
+ If you are running it on stable/ussuri gate onwards for python2.7
+ coverage then you need to do override-checkout with any stable
+ branch less than or equal to stable/train.
+ Former names for this job where:
+ * legacy-tempest-dsvm-neutron-full
+ * gate-tempest-dsvm-neutron-full-ubuntu-xenial
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ ENABLE_FILE_INJECTION: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ USE_PYTHON3: False
+ devstack_services:
+ # NOTE(mriedem): Disable the cinder-backup service from tempest-full
+ # since tempest-full is in the integrated-gate project template but
+ # the backup tests do not really involve other services so they should
+ # be run in some more cinder-specific job, especially because the
+ # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
+ c-bak: false
+
+- job:
+ name: tempest-full-py3
+ parent: devstack-tempest
+ # This currently works from stable/pike on.
+ # Before stable/pike, legacy version of tempest-full
+ # 'legacy-tempest-dsvm-neutron-full' run.
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking and py3.
+ Former names for this job where:
+ * legacy-tempest-dsvm-py35
+ * gate-tempest-dsvm-py35
+ required-projects:
+ - openstack/horizon
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ devstack_services:
+ # Enbale horizon so that we can run horizon test.
+ horizon: true
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ # NOTE(mriedem): Disable the cinder-backup service from
+ # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
+ # project template but the backup tests do not really involve other
+ # services so they should be run in some more cinder-specific job,
+ # especially because the tests fail at a high rate (see bugs 1483434,
+ # 1813217, 1745168)
+ c-bak: false
+ neutron-placement: true
+ neutron-qos: true
+
+- job:
+ name: tempest-integrated-networking
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for networking. This is subset of
+ 'tempest-full-py3' job and run only Neutron and Nova related tests.
+ This is meant to be run on neutron gate only.
+ vars:
+ tox_envlist: integrated-network
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-compute
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for compute. This is
+ subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
+ and Glance related tests. This is meant to be run on Nova gate only.
+ vars:
+ tox_envlist: integrated-compute
+ tempest_black_regex: ""
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-placement
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for placement. This is
+ subset of 'tempest-full-py3' job and run Nova and Neutron
+ related tests. This is meant to be run on Placement gate only.
+ vars:
+ tox_envlist: integrated-placement
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ c-bak: false
+
+- job:
+ name: tempest-integrated-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for image & block storage. This is
+ subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
+ related tests. This is meant to be run on Cinder and Glance gate only.
+ vars:
+ tox_envlist: integrated-storage
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ ENABLE_VOLUME_MULTIATTACH: true
+ GLANCE_USE_IMPORT_WORKFLOW: True
+
+- job:
+ name: tempest-integrated-object-storage
+ parent: devstack-tempest
+ branches: ^(?!stable/ocata).*$
+ description: |
+ This job runs integration tests for object storage. This is
+ subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
+ related tests. This is meant to be run on Swift gate only.
+ vars:
+ tox_envlist: integrated-object-storage
+ devstack_localrc:
+ # NOTE(gmann): swift is not ready on python3 yet and devstack
+ # install it on python2.7 only. But settting the USE_PYTHON3
+ # for future once swift is ready on py3.
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-focal
+ # This job runs on Focal from stable/victoria on.
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-bionic
+ # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
+ # This job is prepared to make sure all stable branches from stable/stein till stable/train
+ # will keep running on bionic. This can be removed once stable/train is EOL.
+ branches:
+ - stable/stein
+ - stable/train
+ - stable/ussuri
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full
+ parent: tempest-multinode-full-base
+ nodeset: openstack-two-node-xenial
+ # This job runs on Xenial and this is for stable/pike, stable/queens
+ # and stable/rocky. This job is prepared to make sure all stable branches
+ # before stable/stein will keep running on xenial. This job can be
+ # removed once stable/rocky is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: tempest-multinode-full-py3
+ parent: tempest-multinode-full
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-slow
+ parent: tempest-multinode-full
+ description: |
+ This multinode integration job will run all the tests tagged as slow.
+ It enables the lvm multibackend setup to cover few scenario tests.
+ This job will run only slow tests (API or Scenario) serially.
+
+ Former names for this job were:
+ * legacy-tempest-dsvm-neutron-scenario-multinode-lvm-multibackend
+ * tempest-scenario-multinode-lvm-multibackend
+ timeout: 10800
+ vars:
+ tox_envlist: slow-serial
+ devstack_localrc:
+ CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ tempest_concurrency: 2
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
+
+- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+ group-vars:
+ subnode:
+ devstack_localrc:
+ USE_PYTHON3: true
+
+- job:
+ name: tempest-cinder-v2-api
+ parent: devstack-tempest
+ branches:
+ - master
+ description: |
+ This job runs the cinder API test against v2 endpoint.
+ vars:
+ tox_envlist: all
+ tempest_test_regex: api.*volume
+ devstack_localrc:
+ TEMPEST_VOLUME_TYPE: volumev2
+
+- job:
+ name: tempest-pg-full
+ parent: tempest-full-py3
+ description: |
+ Base integration test with Neutron networking and PostgreSQL.
+ Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
+ vars:
+ devstack_localrc:
+ # TODO(gmann): Enable File injection tests once nova bug is fixed
+ # https://bugs.launchpad.net/nova/+bug/1882421
+ # ENABLE_FILE_INJECTION: true
+ DATABASE_TYPE: postgresql
+
+- project-template:
+ name: integrated-gate-networking
+ description: |
+ Run the python3 Tempest network integration tests (Nova and Neutron related)
+ in check and gate for the neutron integrated gate. This is meant to be
+ run on neutron gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-networking
+
+- project-template:
+ name: integrated-gate-compute
+ description: |
+ Run the python3 Tempest compute integration tests
+ (Nova, Neutron, Cinder and Glance related) in check and gate
+ for the Nova integrated gate. This is meant to be
+ run on Nova gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-compute
+
+- project-template:
+ name: integrated-gate-placement
+ description: |
+ Run the python3 Tempest placement integration tests
+ (Nova and Neutron related) in check and gate
+ for the Placement integrated gate. This is meant to be
+ run on Placement gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-placement
+
+- project-template:
+ name: integrated-gate-storage
+ description: |
+ Run the python3 Tempest image & block storage integration tests
+ (Cinder, Glance, Swift and Nova related) in check and gate
+ for the neutron integrated gate. This is meant to be
+ run on Cinder and Glance gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-storage
+
+- project-template:
+ name: integrated-gate-object-storage
+ description: |
+ Run the python3 Tempest object storage integration tests
+ (Swift, Cinder and Glance related) in check and gate
+ for the swift integrated gate. This is meant to be
+ run on swift gate only.
+ check:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
+ gate:
+ jobs:
+ - grenade
+ - tempest-integrated-object-storage
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
new file mode 100644
index 0000000..5dcd27f
--- /dev/null
+++ b/zuul.d/project.yaml
@@ -0,0 +1,143 @@
+- project:
+ templates:
+ - check-requirements
+ - integrated-gate-py3
+ - openstack-cover-jobs
+ - openstack-python3-victoria-jobs
+ - publish-openstack-docs-pti
+ - release-notes-jobs-python3
+ check:
+ jobs:
+ - devstack-tempest:
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - devstack-tempest-ipv6:
+ voting: false
+ files:
+ - ^playbooks/
+ - ^roles/
+ - ^.zuul.yaml$
+ - tempest-full-parallel:
+ # Define list of irrelevant files to use everywhere else
+ irrelevant-files: &tempest-irrelevant-files
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - ^tools/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-ipv6:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - glance-multistore-cinder-import:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-victoria-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-ussuri-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-train-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-multinode-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-tox-plugin-sanity-check:
+ irrelevant-files: &tempest-irrelevant-files-2
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tempest/hacking/.*$
+ - ^tempest/tests/.*$
+ - ^.coveragerc$
+ - ^.gitignore$
+ - ^.gitreview$
+ - ^.mailmap$
+ # tools/ is not here since this relies on a script in tools/.
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-live-migration:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - devstack-plugin-ceph-tempest-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-grenade-multinode:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario001-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario002-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario003-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - puppet-openstack-integration-4-scenario004-tempest-centos-7:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-tempest-dvr:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - interop-tempest-consistency:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-test-account-no-admin-py3:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - openstack-tox-bashate:
+ irrelevant-files: *tempest-irrelevant-files-2
+ gate:
+ jobs:
+ - tempest-slow-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-grenade-multinode:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ - grenade:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-ipv6-only:
+ irrelevant-files: *tempest-irrelevant-files-2
+ - devstack-plugin-ceph-tempest-py3:
+ irrelevant-files: *tempest-irrelevant-files
+ experimental:
+ jobs:
+ - tempest-cinder-v2-api:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-all:
+ irrelevant-files: *tempest-irrelevant-files
+ - neutron-tempest-dvr-ha-multinode-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - nova-tempest-v2-api:
+ irrelevant-files: *tempest-irrelevant-files
+ - cinder-tempest-lvm-multibackend:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-pg-full:
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-py3-opensuse15:
+ irrelevant-files: *tempest-irrelevant-files
+ periodic-stable:
+ jobs:
+ - tempest-full-victoria-py3
+ - tempest-full-ussuri-py3
+ - tempest-full-train-py3
+ periodic:
+ jobs:
+ - tempest-all
+ - tempest-full-oslo-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
new file mode 100644
index 0000000..769b280
--- /dev/null
+++ b/zuul.d/stable-jobs.yaml
@@ -0,0 +1,17 @@
+# NOTE(gmann): This file includes all stable release jobs definition.
+- job:
+ name: tempest-full-victoria-py3
+ parent: tempest-full-py3
+ override-checkout: stable/victoria
+
+- job:
+ name: tempest-full-ussuri-py3
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-bionic
+ override-checkout: stable/ussuri
+
+- job:
+ name: tempest-full-train-py3
+ parent: tempest-full-py3
+ nodeset: openstack-single-node-bionic
+ override-checkout: stable/train
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
new file mode 100644
index 0000000..387a94b
--- /dev/null
+++ b/zuul.d/tempest-specific.yaml
@@ -0,0 +1,113 @@
+# NOTE(gmann): This file includes all tempest specific jobs definition which
+# are supposed to be run by Tempest gate only.
+- job:
+ name: tempest-full-oslo-master
+ parent: tempest-full-py3
+ description: |
+ Integration test using current git of oslo libs.
+ This ensures that when oslo libs get released that they
+ do not break OpenStack server projects.
+
+ Former name for this job was
+ periodic-tempest-dsvm-oslo-latest-full-master.
+ timeout: 10800
+ required-projects:
+ - opendev.org/openstack/oslo.cache
+ - opendev.org/openstack/oslo.concurrency
+ - opendev.org/openstack/oslo.config
+ - opendev.org/openstack/oslo.context
+ - opendev.org/openstack/oslo.db
+ - opendev.org/openstack/oslo.i18n
+ - opendev.org/openstack/oslo.log
+ - opendev.org/openstack/oslo.messaging
+ - opendev.org/openstack/oslo.middleware
+ - opendev.org/openstack/oslo.policy
+ - opendev.org/openstack/oslo.privsep
+ - opendev.org/openstack/oslo.reports
+ - opendev.org/openstack/oslo.rootwrap
+ - opendev.org/openstack/oslo.serialization
+ - opendev.org/openstack/oslo.service
+ - opendev.org/openstack/oslo.utils
+ - opendev.org/openstack/oslo.versionedobjects
+ - opendev.org/openstack/oslo.vmware
+
+- job:
+ name: tempest-full-parallel
+ parent: tempest-full-py3
+ voting: false
+ branches:
+ - master
+ description: |
+ Base integration test with Neutron networking.
+ It includes all scenarios as it was in the past.
+ This job runs all scenario tests in parallel!
+ timeout: 9000
+ vars:
+ tox_envlist: full-parallel
+ run_tempest_cleanup: true
+ run_tempest_dry_cleanup: true
+
+- job:
+ name: tempest-full-py3-ipv6
+ parent: devstack-tempest-ipv6
+ branches: ^(?!stable/ocata).*$
+ description: |
+ Base integration test with Neutron networking, IPv6 and py3.
+ vars:
+ tox_envlist: full
+ devstack_localrc:
+ USE_PYTHON3: true
+ FORCE_CONFIG_DRIVE: true
+ devstack_services:
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ # without Swift, c-bak cannot run (in the Gate at least)
+ c-bak: false
+
+- job:
+ name: tempest-full-py3-opensuse15
+ parent: tempest-full-py3
+ nodeset: devstack-single-node-opensuse-15
+ description: |
+ Base integration test with Neutron networking and py36 running
+ on openSUSE Leap 15.x
+ voting: false
+
+- job:
+ name: tempest-tox-plugin-sanity-check
+ parent: tox
+ description: |
+ Run tempest plugin sanity check script using tox.
+ nodeset: ubuntu-focal
+ vars:
+ tox_envlist: plugin-sanity-check
+ timeout: 5000
+
+- job:
+ name: tempest-full-test-account-py3
+ parent: tempest-full-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials instead of dynamic credentials and py3.
+ Former names for this job were:
+ - legacy-tempest-dsvm-full-test-accounts
+ - legacy-tempest-dsvm-neutron-full-test-accounts
+ - legacy-tempest-dsvm-identity-v3-test-accounts
+ vars:
+ devstack_localrc:
+ TEMPEST_USE_TEST_ACCOUNTS: True
+
+- job:
+ name: tempest-full-test-account-no-admin-py3
+ parent: tempest-full-test-account-py3
+ description: |
+ This job runs the full set of tempest tests using pre-provisioned
+ credentials and py3 without having an admin account.
+ Former name for this job was:
+ - legacy-tempest-dsvm-neutron-full-non-admin
+
+ vars:
+ devstack_localrc:
+ TEMPEST_HAS_ADMIN: False